The following opengl code is specifically made to be very GPU-heavy and this forces the CPU to wait for a bit while the GPU finishes its work. In particular, it does so at the glFinish() call where the CPU waits for a measured 99.87% of the time per frame. The program runs at 10 fps on my system (windows 10, gtx1070) and Vsync disabled. This is all expected, if it wasn't for the fact that while the CPU is supposed to wait, it inexplicably uses 100% CPU time, causing overheating.
After testing on 6 systems with intel gpus, 4 with amd gpus and 5 with nvidia gpus, only those with nvidia gpus have the problem. All i can conclude so far is that this problem is nvidia-specific and opengl-specific. Directx apps don't show the problem, in fact, it is possible to reproduce this problem on Firefox by running a gpu-maxing webgl page with ANGLE disabled (doesn't happen with angle enabled).
I compile with the following:
C:\mingw64\bin\x86_64-w64-mingw32-gcc.exe %~dp0\main.c -o %~dp0\main.exe -static-libgcc -std=c11 -ggdb -O2 -Wall -BC:\mingw64\bin\ -LC:\mingw64\lib\ -IC:\mingw64\include\ -lgdi32 -lopengl32
Minimal code (i suggest to tweak the fragment shader so that you hit around 10 fps as well, makes the problem more apparent):
#include <windows.h>
#include <GL/gl.h>
typedef signed long long int GLsizeiptr;
typedef char GLchar;
#define GL_ARRAY_BUFFER 0x8892
#define GL_DYNAMIC_DRAW 0x88E8
#define GL_FRAGMENT_SHADER 0x8B30
#define GL_VERTEX_SHADER 0x8B31
LRESULT CALLBACK WndProc(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam) {
return DefWindowProc(hwnd, msg, wParam, lParam);
}
int main() {
HDC hdc;
{
WNDCLASS wc;
memset(&wc, 0, sizeof(wc));
wc.style = CS_OWNDC;
wc.lpfnWndProc = WndProc;
wc.lpszClassName = "gldemo";
if (!RegisterClass(&wc)) return 0;
HWND hwnd = CreateWindow("gldemo", "Demo", WS_POPUP, 0, 0, 1920/2, 1080/2, 0, 0, NULL, 0);
hdc = GetDC(hwnd);
const PIXELFORMATDESCRIPTOR pfd = {0,0, PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER ,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
SetPixelFormat(hdc, ChoosePixelFormat(hdc, &pfd), &pfd);
wglMakeCurrent(hdc, wglCreateContext(hdc));
ShowWindow(hwnd, SW_SHOW);
}
void (*glGenBuffers)(GLsizei, GLuint *) = (void*)wglGetProcAddress("glGenBuffers");
void (*glBindBuffer)(GLenum, GLuint) = (void*)wglGetProcAddress("glBindBuffer");
void (*glBufferData)(GLenum, GLsizeiptr, void *, GLenum) = (void*)wglGetProcAddress("glBufferData");
GLuint (*glCreateShader)(GLuint) = (void*)wglGetProcAddress("glCreateShader");
void (*glAttachShader)(GLuint, GLuint) = (void*)wglGetProcAddress("glAttachShader");
void (*glCompileShader)(GLuint) = (void*)wglGetProcAddress("glCompileShader");
void (*glShaderSource)(GLuint, GLuint, const char **, const GLint *) = (void*)wglGetProcAddress("glShaderSource");
void (*glEnableVertexAttribArray)(GLuint) = (void*)wglGetProcAddress("glEnableVertexAttribArray");
GLuint (*glGetAttribLocation)(GLuint, GLchar *) = (void*)wglGetProcAddress("glGetAttribLocation");
void (*glVertexAttribPointer)(GLuint, GLint, GLenum, GLboolean, GLsizei, void *) = (void*)wglGetProcAddress("glVertexAttribPointer");
GLuint (*glCreateProgram)() = (void*)wglGetProcAddress("glCreateProgram");
void (*glLinkProgram)(GLuint) = (void*)wglGetProcAddress("glLinkProgram");
void (*glUseProgram)(GLuint) = (void*)wglGetProcAddress("glUseProgram");
const char *g_vertCode =
"#version 420\n"
"in vec3 vertexPosition;"
"void main() {gl_Position = vec4(vertexPosition.xyz, 1.);}";
const char *g_fragCode =
"#version 420\n"
"void main() {"
"float res = 0.5;"
"for (int t=0;t<58000;t++) {" // tweak this to make sure you're outputting ~10 fps. 58000 is ok for a gtx 1070.
"res = fract(sin(dot(gl_FragCoord.xy+res, vec2(12.9898,78.233))) * 43758.5453);"
"}"
"gl_FragColor = vec4(vec3(res)*0.4, 1.0);"
"}";
GLuint prog = glCreateProgram();
GLuint vertshader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertshader, 1, &g_vertCode, 0);
glCompileShader(vertshader);
glAttachShader(prog, vertshader);
GLuint fragshader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragshader, 1, &g_fragCode, 0);
glCompileShader(fragshader);
glAttachShader(prog, fragshader);
glLinkProgram(prog);
glUseProgram(prog);
GLuint attribVertexPosition = glGetAttribLocation(prog, "vertexPosition");
float verts[4*3] = {1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, -1.0f, -1.0f, 0.0f};
GLuint vboId;
glGenBuffers(1, &vboId);
glBindBuffer(GL_ARRAY_BUFFER, vboId);
glBufferData(GL_ARRAY_BUFFER, 4*3*4, verts, GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(attribVertexPosition);
glVertexAttribPointer(attribVertexPosition, 3, GL_FLOAT, 0, 12, (void*)0);
for (;;) {
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
//__asm__("int $3");
glFinish(); // Halts here with 100% cpu on my system.
SwapBuffers(hdc);
MSG msg; char done = 0;
while (PeekMessage(&msg, 0, 0, 0, PM_REMOVE)) {
if (msg.message == WM_QUIT) done = 1;
TranslateMessage(&msg);
DispatchMessage(&msg);
}
if (done) break;
}
return 0;
}
This isn't an answer, so i won't mark it as one, but i can provide a workaround for now.
The idea is that the glFlush function guarantees that the gpu has started its processing, so we can, on paper, insert a wait after glFlush and before glFinish to cut down on the amount of time glFinish spends frying the cpu.
This is only viable if the resolution of the waiting function is at worst 1ms. If you use Sleep, or any other function in the Windows API that has a tweakable timeout, this can be set by calling timeBeginPeriod(1) at the start of the program.
To make sure the wait doesn't overshoot, which can cause the gpu to become idle, the formula used for waiting in the next frame is the time just spent by the wait plus the residual time spent on glFinish minus 0.5 milliseconds.
The result of all this is that the gpu stays well-fed at 100% usage according to the Task Manager yet the cpu utilization only starts to increase significantly if the time that vanilla glFinish would have spent gets lower than 4 ms and the mechanism disengages itself entirely if it gets lower than roughly 1.5 ms.
In an actual implementation one might consider inserting the SwapBuffers call inside the timed area to include in the calculation whatever frame-limiting systems might be in place and perhaps using Waitable Timers instead of Sleep.
Compiling with:
C:\mingw64\bin\x86_64-w64-mingw32-gcc.exe %~dp0\main.c -o %~dp0\main.exe -static-libgcc -std=c11 -ggdb -O2 -Wall -BC:\mingw64\bin\ -LC:\mingw64\lib\ -IC:\mingw64\include\ -lgdi32 -lopengl32 -lwinmm
Updated code:
#include <windows.h>
#include <stdio.h>
#include <GL/gl.h>
typedef signed long long int GLsizeiptr;
typedef char GLchar;
#define GL_ARRAY_BUFFER 0x8892
#define GL_DYNAMIC_DRAW 0x88E8
#define GL_FRAGMENT_SHADER 0x8B30
#define GL_VERTEX_SHADER 0x8B31
LARGE_INTEGER Frequency;
unsigned long long int elapsedMicroseconds(LARGE_INTEGER StartingTime) {
LARGE_INTEGER EndingTime;
QueryPerformanceCounter(&EndingTime);
return (1000000*(EndingTime.QuadPart - StartingTime.QuadPart))/Frequency.QuadPart;
}
LRESULT CALLBACK WndProc(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam) {
return DefWindowProc(hwnd, msg, wParam, lParam);
}
int main() {
QueryPerformanceFrequency(&Frequency);
timeBeginPeriod(1);
HDC hdc;
{
WNDCLASS wc;
memset(&wc, 0, sizeof(wc));
wc.style = CS_OWNDC;
wc.lpfnWndProc = WndProc;
wc.lpszClassName = "gldemo";
if (!RegisterClass(&wc)) return 0;
HWND hwnd = CreateWindow("gldemo", "Demo", WS_POPUP, 0, 0, 1920/2, 1080/2, 0, 0, NULL, 0);
hdc = GetDC(hwnd);
const PIXELFORMATDESCRIPTOR pfd = {0,0, PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER ,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
SetPixelFormat(hdc, ChoosePixelFormat(hdc, &pfd), &pfd);
wglMakeCurrent(hdc, wglCreateContext(hdc));
ShowWindow(hwnd, SW_SHOW);
}
void (*glGenBuffers)(GLsizei, GLuint *) = (void*)wglGetProcAddress("glGenBuffers");
void (*glBindBuffer)(GLenum, GLuint) = (void*)wglGetProcAddress("glBindBuffer");
void (*glBufferData)(GLenum, GLsizeiptr, void *, GLenum) = (void*)wglGetProcAddress("glBufferData");
GLuint (*glCreateShader)(GLuint) = (void*)wglGetProcAddress("glCreateShader");
void (*glAttachShader)(GLuint, GLuint) = (void*)wglGetProcAddress("glAttachShader");
void (*glCompileShader)(GLuint) = (void*)wglGetProcAddress("glCompileShader");
void (*glShaderSource)(GLuint, GLuint, const char **, const GLint *) = (void*)wglGetProcAddress("glShaderSource");
void (*glEnableVertexAttribArray)(GLuint) = (void*)wglGetProcAddress("glEnableVertexAttribArray");
GLuint (*glGetAttribLocation)(GLuint, GLchar *) = (void*)wglGetProcAddress("glGetAttribLocation");
void (*glVertexAttribPointer)(GLuint, GLint, GLenum, GLboolean, GLsizei, void *) = (void*)wglGetProcAddress("glVertexAttribPointer");
GLuint (*glCreateProgram)() = (void*)wglGetProcAddress("glCreateProgram");
void (*glLinkProgram)(GLuint) = (void*)wglGetProcAddress("glLinkProgram");
void (*glUseProgram)(GLuint) = (void*)wglGetProcAddress("glUseProgram");
const char *g_vertCode =
"#version 420\n"
"in vec3 vertexPosition;"
"void main() {gl_Position = vec4(vertexPosition.xyz, 1.);}";
const char *g_fragCode =
"#version 420\n"
"void main() {"
"float res = 0.5;"
"for (int t=0;t<58000;t++) {" // tweak this to make sure you're outputting ~10 fps. 58000 is ok for a gtx 1070.
"res = fract(sin(dot(gl_FragCoord.xy+res, vec2(12.9898,78.233))) * 43758.5453);"
"}"
"gl_FragColor = vec4(vec3(res)*0.4, 1.0);"
"}";
GLuint prog = glCreateProgram();
GLuint vertshader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertshader, 1, &g_vertCode, 0);
glCompileShader(vertshader);
glAttachShader(prog, vertshader);
GLuint fragshader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragshader, 1, &g_fragCode, 0);
glCompileShader(fragshader);
glAttachShader(prog, fragshader);
glLinkProgram(prog);
glUseProgram(prog);
GLuint attribVertexPosition = glGetAttribLocation(prog, "vertexPosition");
float verts[4*3] = {1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, -1.0f, -1.0f, 0.0f};
GLuint vboId;
glGenBuffers(1, &vboId);
glBindBuffer(GL_ARRAY_BUFFER, vboId);
glBufferData(GL_ARRAY_BUFFER, 4*3*4, verts, GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(attribVertexPosition);
glVertexAttribPointer(attribVertexPosition, 3, GL_FLOAT, 0, 12, (void*)0);
LARGE_INTEGER syncer;
long long int waitfor = 0;
for (;;) {
//__asm__("int $3");
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glFlush();
QueryPerformanceCounter(&syncer);
if (waitfor>0) Sleep(waitfor/1000);
glFinish();
waitfor = elapsedMicroseconds(syncer)-500;
SwapBuffers(hdc);
MSG msg; char done = FALSE;
while (PeekMessage(&msg, 0, 0, 0, PM_REMOVE)) {
if (msg.message == WM_QUIT) done = TRUE;
TranslateMessage(&msg);
DispatchMessage(&msg);
}
if (done) break;
}
return 0;
}
Related
As the question, how to draw the 3d ice cream? I have draw a cone and a ball. But the ball cannot fit inside the cone.....I try with many way, but the ball is either bihind the cone or whole one infornt the cone... Can anyone exust me on this. I have follow the note from lecturer but still not able to get.
#include <Windows.h>
#include <gl/GL.h>
#include <math.h>
#include <time.h>
#include <gl/GLU.h>
#pragma comment (lib, "OpenGL32.lib")
#pragma comment (lib, "GLU32.lib")
#define WINDOW_TITLE "OpenGL Window"
LRESULT WINAPI WindowProcedure(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
switch (msg)
{
case WM_DESTROY:
PostQuitMessage(0);
break;
default:
break;
}
return DefWindowProc(hWnd, msg, wParam, lParam);
}
//--------------------------------------------------------------------
bool initPixelFormat(HDC hdc)
{
PIXELFORMATDESCRIPTOR pfd;
ZeroMemory(&pfd, sizeof(PIXELFORMATDESCRIPTOR));
pfd.cAlphaBits = 8;
pfd.cColorBits = 32;
pfd.cDepthBits = 24;
pfd.cStencilBits = 0;
pfd.dwFlags = PFD_DOUBLEBUFFER | PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW;
pfd.iLayerType = PFD_MAIN_PLANE;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR);
pfd.nVersion = 1;
// choose pixel format returns the number most similar pixel format available
int n = ChoosePixelFormat(hdc, &pfd);
// set pixel format returns whether it sucessfully set the pixel format
if (SetPixelFormat(hdc, n, &pfd))
{
return true;
}
else
{
return false;
}
}
//--------------------------------------------------------------------
void display()
{
glPushMatrix();
glRotatef(120, 1.0, 0, 0);
GLUquadricObj * cylinder = NULL;
cylinder = gluNewQuadric();
glColor3f(1, 0, 0);
gluQuadricDrawStyle(cylinder, GLU_FILL);
gluCylinder(cylinder, 0.52, 0.0, 2.0, 30, 20);
gluDeleteQuadric(cylinder);
GLUquadricObj * sphere = NULL;
sphere = gluNewQuadric();
glColor3f(1, 1, 1);
gluQuadricDrawStyle(sphere, GLU_LINE);
gluSphere(sphere, 0.5, 20, 20);
gluDeleteQuadric(sphere);
glPopMatrix();
}
//--------------------------------------------------------------------
int WINAPI WinMain(HINSTANCE hInst, HINSTANCE, LPSTR, int nCmdShow)
{
WNDCLASSEX wc;
ZeroMemory(&wc, sizeof(WNDCLASSEX));
wc.cbSize = sizeof(WNDCLASSEX);
wc.hInstance = GetModuleHandle(NULL);
wc.lpfnWndProc = WindowProcedure;
wc.lpszClassName = WINDOW_TITLE;
wc.style = CS_HREDRAW | CS_VREDRAW;
if (!RegisterClassEx(&wc)) return false;
HWND hWnd = CreateWindow(WINDOW_TITLE, WINDOW_TITLE, WS_OVERLAPPEDWINDOW,
CW_USEDEFAULT, CW_USEDEFAULT, 800, 640,
NULL, NULL, wc.hInstance, NULL);
//--------------------------------
// Initialize window for OpenGL
//--------------------------------
HDC hdc = GetDC(hWnd);
// initialize pixel format for the window
initPixelFormat(hdc);
// get an openGL context
HGLRC hglrc = wglCreateContext(hdc);
// make context current
if (!wglMakeCurrent(hdc, hglrc)) return false;
//--------------------------------
// End initialization
//--------------------------------
ShowWindow(hWnd, nCmdShow);
MSG msg;
ZeroMemory(&msg, sizeof(msg));
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-2.0f, +3.0f, -2.0f, +2.0f, -10.0f, +10.0f);
while (true)
{
if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
{
if (msg.message == WM_QUIT) break;
TranslateMessage(&msg);
DispatchMessage(&msg);
}
display();
SwapBuffers(hdc);
}
UnregisterClass(WINDOW_TITLE, wc.hInstance);
return true;
}
//--------------------------------------------------------------------
Note, that drawing by glBegin/glEnd sequences, the fixed function pipeline matrix stack and fixed function pipeline per vertex light model, is deprecated since decades.
Read about Fixed Function Pipeline and see Vertex Specification and Shader for a state of the art way of rendering.
anyway, In the PIXELFORMATDESCRIPTOR the depth buffer is proper specified:
pfd.cDepthBits = 24;
now you have to use the depth buffer.
Side note, the number of the color buffer bits should be 24 instead of 32 see the documentation of cColorBits:
Specifies the number of color bitplanes in each color buffer. For RGBA pixel types, it is the size of the color buffer, excluding the alpha bitplanes. For color-index pixels, it is the size of the color-index buffer.
To use the depth buffer the Depth Test has to be enabled by glEnable.
Further the color buffer and the depth buffer of the default framebuffer has to be cleared at the begin of every frame by glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
void display()
{
glEnable( GL_DEPTH_TEST );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glPushMatrix();
glRotatef(120, 1.0, 0, 0);
GLUquadricObj * cylinder = NULL;
cylinder = gluNewQuadric();
glColor3f(1, 0.5, 0);
gluQuadricDrawStyle(cylinder, GLU_FILL);
gluCylinder(cylinder, 0.52, 0.0, 2.0, 30, 20);
gluDeleteQuadric(cylinder);
GLUquadricObj * sphere = NULL;
sphere = gluNewQuadric();
glColor3f(1, 1, 0.5);
gluQuadricDrawStyle(sphere, GLU_FILL);
gluSphere(sphere, 0.5, 20, 20);
gluDeleteQuadric(sphere);
glPopMatrix();
}
See the preview, where i changed the gluQuadricDrawStyle for the sphere from GLU_LINE to GL_FILL:
Below I've included glfw-quick-test.c, which is basically copied verbatim from http://www.glfw.org/docs/latest/quick.html - except for removed use of glad, added background color, a couple of defines so it compiles on Ubuntu 14.04 (64-bit), and a preprocessor #ifdef switch (macro DO_OPENGL_THREE) to change requested OpenGL version.
When I compile with:
gcc -g -o glfw-quick-test.exe glfw-quick-test.c -lglfw -lGLU -lGL -lm
./glfw-quick-test.exe
... then I get the message "GLFW Requesting OpenGL 2.1" and drawing is fine:
When I compile with:
gcc -g -DDO_OPENGL_THREE -o glfw-quick-test.exe glfw-quick-test.c -lglfw -lGLU -lGL -lm
./glfw-quick-test.exe
... then I get the message "GLFW Requesting OpenGL 3.2" and the rotating triangle is not drawn at all - only the background color is preserved:
Can anyone explain why does this happen? Can I somehow get GLFW3 to draw even if OpenGL 3.2 is requested, and if so, how?
(I'm aware that the original source code says "// NOTE: OpenGL error checks have been omitted for brevity", but I'm not sure what sort of error checks I should add, to see what the problem would be with OpenGL 3.2 drawing ...)
The code, glfw-quick-test.c (EDIT: now with error checking):
// http://www.glfw.org/docs/latest/quick.html
#define UBUNTU14
#ifdef UBUNTU14 // assume Ubuntu 14.04
// strange; Ubuntu 14 GLFW/glfw3.h doesn't have GLFW_TRUE, GLFW_FALSE, mentions GL_TRUE GL_FALSE
#define GLFW_TRUE GL_TRUE
#define GLFW_FALSE GL_FALSE
#endif
//~ #include <glad/glad.h> // "GL/GLES/EGL/GLX/WGL Loader-Generator based on the official specs."
#include <GLFW/glfw3.h>
#include "linmath.h"
#include <stdlib.h>
#include <stdio.h>
static const struct
{
float x, y;
float r, g, b;
} vertices[3] =
{
{ -0.6f, -0.4f, 1.f, 0.f, 0.f },
{ 0.6f, -0.4f, 0.f, 1.f, 0.f },
{ 0.f, 0.6f, 0.f, 0.f, 1.f }
};
static const char* vertex_shader_text =
"uniform mat4 MVP;\n"
"attribute vec3 vCol;\n"
"attribute vec2 vPos;\n"
"varying vec3 color;\n"
"void main()\n"
"{\n"
" gl_Position = MVP * vec4(vPos, 0.0, 1.0);\n"
" color = vCol;\n"
"}\n";
static const char* fragment_shader_text =
"varying vec3 color;\n"
"void main()\n"
"{\n"
" gl_FragColor = vec4(color, 1.0);\n"
"}\n";
static void error_callback(int error, const char* description)
{
fprintf(stderr, "Error: %s\n", description);
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GLFW_TRUE);
}
void checkGLerrors(char *label) {
// check OpenGL error
GLenum err;
while ((err = glGetError()) != GL_NO_ERROR) {
char* errorstr = "";
switch(err) {
case GL_INVALID_OPERATION: errorstr="INVALID_OPERATION"; break;
case GL_INVALID_ENUM: errorstr="INVALID_ENUM"; break;
case GL_INVALID_VALUE: errorstr="INVALID_VALUE"; break;
case GL_OUT_OF_MEMORY: errorstr="OUT_OF_MEMORY"; break;
case GL_INVALID_FRAMEBUFFER_OPERATION: errorstr="INVALID_FRAMEBUFFER_OPERATION"; break;
}
printf("OpenGL error ('%s'): %d %s\n", label, err, errorstr);
}
}
int main(void)
{
GLFWwindow* window;
GLuint vertex_buffer, vertex_shader, fragment_shader, program;
GLint mvp_location, vpos_location, vcol_location;
glfwSetErrorCallback(error_callback);
if (!glfwInit())
exit(EXIT_FAILURE);
// NB: Ubuntu will not draw if 3.2 (just black screen) - only if 2.0 or 2.1?
#ifdef DO_OPENGL_THREE
printf("GLFW Requesting OpenGL 3.2\n");
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // only 3.2+
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE); //only 3.0+
glfwWindowHint(GLFW_RESIZABLE, GL_TRUE); // https://stackoverflow.com/q/23834680/
#else
printf("GLFW Requesting OpenGL 2.1\n");
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2); // 2);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1); // 0);
#endif
checkGLerrors("post hint");
window = glfwCreateWindow(640, 480, "Simple example", NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
checkGLerrors("post glfwCreateWindow");
glfwSetKeyCallback(window, key_callback);
glfwMakeContextCurrent(window);
//~ gladLoadGLLoader((GLADloadproc) glfwGetProcAddress);
glfwSwapInterval(1);
// NOTE: OpenGL error checks have been omitted for brevity
glGenBuffers(1, &vertex_buffer);
checkGLerrors("post glGenBuffers");
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader, 1, &vertex_shader_text, NULL);
glCompileShader(vertex_shader);
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader, 1, &fragment_shader_text, NULL);
glCompileShader(fragment_shader);
program = glCreateProgram();
glAttachShader(program, vertex_shader);
glAttachShader(program, fragment_shader);
glLinkProgram(program);
checkGLerrors("post glLinkProgram");
mvp_location = glGetUniformLocation(program, "MVP");
vpos_location = glGetAttribLocation(program, "vPos");
vcol_location = glGetAttribLocation(program, "vCol");
checkGLerrors("post gl locations");
glEnableVertexAttribArray(vpos_location);
checkGLerrors("post gl EnableVertexAttribArray");
glVertexAttribPointer(vpos_location, 2, GL_FLOAT, GL_FALSE,
sizeof(float) * 5, (void*) 0);
checkGLerrors("post glVertexAttribPointer");
glEnableVertexAttribArray(vcol_location);
checkGLerrors("post glEnableVertexAttribArray");
glVertexAttribPointer(vcol_location, 3, GL_FLOAT, GL_FALSE,
sizeof(float) * 5, (void*) (sizeof(float) * 2));
checkGLerrors("post glVertexAttribPointer");
while (!glfwWindowShouldClose(window))
{
float ratio;
int width, height;
mat4x4 m, p, mvp;
glfwGetFramebufferSize(window, &width, &height);
ratio = width / (float) height;
glViewport(0, 0, width, height);
glClearColor(0.784314, 0.780392, 0.305882, 1.0); // add background color
glClear(GL_COLOR_BUFFER_BIT);
mat4x4_identity(m);
mat4x4_rotate_Z(m, m, (float) glfwGetTime());
mat4x4_ortho(p, -ratio, ratio, -1.f, 1.f, 1.f, -1.f);
mat4x4_mul(mvp, p, m);
glUseProgram(program);
glUniformMatrix4fv(mvp_location, 1, GL_FALSE, (const GLfloat*) mvp);
glDrawArrays(GL_TRIANGLES, 0, 3);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
Thanks to #BDL and his comment about VAO (vertex array objects), I found How to use VBOs without VAOs with OpenGL core profile? - and by trying out stuff from there, I found that the only change to the above OP code, needed so drawing is shown in OpenGL 3.2, is this:
...
//~ gladLoadGLLoader((GLADloadproc) glfwGetProcAddress);
glfwSwapInterval(1);
#ifdef DO_OPENGL_THREE
// https://stackoverflow.com/a/30057424/277826:
// "You can however just create and bind a VAO and forget about it (keep it bound)."
GLuint VAO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
#endif
// NOTE: OpenGL error checks have been omitted for brevity
glGenBuffers(1, &vertex_buffer);
...
Once this section with VAO is in, there are no more OpenGL errors printed, and drawing is OK.
I'm using OpenGL 3 and Glew in order to draw a triangle, I have a window (changing the background color works fine) but I can't put my shader on it. I did some tests like:
glGetProgramiv(shader_programme, GL_LINK_STATUS, &isLinked);
printf("\nProg : %i",isLinked);
And it's fine; print returns 1 for the program, the vertex and the frag.
I suppose I missed a clear somewhere, but I'm not sure and also pretty lost here...
This is my code:
#include "../include/scop.h"
#include <OpenGL/gl.h>
#include ".../lfw3/3.2.1/include/GLFW/glfw3.h"
t_scop *ft_init_window(t_scop *scop, t_parse parse)
{
if (!glfwInit())
ft_putstr("error init");
else
{
glfwWindowHint(GLFW_SAMPLES, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
WIN = glfwCreateWindow(WIN_X, WIN_Y, "Scop", NULL, NULL);
glfwMakeContextCurrent(WIN);
glfwSetInputMode(WIN, GLFW_STICKY_KEYS, GL_TRUE);
glfwSetInputMode(WIN, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
glfwPollEvents();
glfwSetCursorPos(WIN, WIN_X / 2.0, WIN_Y / 2.0);
glClearColor(0.0f, 0.5f, 0.4f, 0.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
}
float points[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f
};
//init buffer and fill it
GLuint vbo = 0;
glGenBuffers (1, &vbo);
glBindBuffer (GL_ARRAY_BUFFER, vbo);
glBufferData (GL_ARRAY_BUFFER, 9 * sizeof (float), points, GL_STATIC_DRAW);
//init VertexArray
GLuint vao = 0;
glGenVertexArraysAPPLE (1, &vao);
glBindVertexArrayAPPLE (vao);
glEnableVertexAttribArray (0);
glBindBuffer (GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer (0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glDrawArrays(GL_TRIANGLES, 0, 3);
const char* vertex_shader =
"#version 330 core\n"
"layout (location = 0) in vec3 position;\n"
"void main () {"
"gl_Position.xyz = position;"
"gl_Position.w = 1.0;"
"}\0";
const char* fragment_shader =
"#version 330 core\n"
"out vec3 color;"
"void main () {"
"color = vec3(1,0,0);"
"}\0";
//create vertex
GLuint vs = glCreateShader (GL_VERTEX_SHADER);
glShaderSource (vs, 1, &vertex_shader, NULL);
glCompileShader (vs);
//tests
GLint success = 0;
glGetShaderiv(vs, GL_COMPILE_STATUS, &success);
printf ("Taille du source:%i\n", success);
if (GL_FALSE == success)
printf("false");
else printf("true");
//create frag
GLuint fs = glCreateShader (GL_FRAGMENT_SHADER);
glShaderSource (fs, 1, &fragment_shader, NULL);
glCompileShader (fs);
//tests
success = 0;
glGetShaderiv(fs, GL_COMPILE_STATUS, &success);
printf("Taille fs : %i",success);
// GLuint shader_programme = LoadShaders (vs,fs);
GLint shader_programme = glCreateProgram ();
glAttachShader (shader_programme, vs);
glAttachShader (shader_programme, fs);
glLinkProgram (shader_programme);
//tests
GLint isLinked = 0;
glGetProgramiv(shader_programme, GL_LINK_STATUS, &isLinked);
printf("\nProg : %i",isLinked);
//idk if i need to do this now
glDetachShader(shader_programme, vs);
glDetachShader(shader_programme, fs);
glDeleteShader(vs);
glDeleteShader(fs);
glGetError();
while (!glfwWindowShouldClose(WIN))
{
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearDepth(GL_DEPTH_TEST);
glUseProgram (shader_programme);
glBindVertexArrayAPPLE (vao);
glDrawArrays (GL_TRIANGLES, 0, 3);
//glUseProgram(0); ???
glfwPollEvents ();
glBindVertexArrayAPPLE (0);
glfwSwapBuffers(WIN);
}
// glfwTerminate();
return (scop);
}
Any help is greatly appreciated!
The problem lies in this line:
glClearDepth(GL_DEPTH_TEST);
glClearDepth (doc) specifies with which value the depth buffer should be cleared and expects a floating point value between 0 and 1. It is the similar to glClearColor, just for depth.
Additionally, you should be using the core profile VAO functions instead of the ones from the APPLE extension. The apple extension should only be used in a OpenGL context <= 2.1.
The code compiles but crashes when run.
I think it is VertexShaderSource and FragmentShaderSource that are causing problems but I can't seem to find it.
I have been trying to solve this for days now, so your help would be much appreciated.
If the following lines of code are removed the program seems to run as intended.
GLuint RedShader = CreateShader();
glUseProgram(RedShader);
Here is the command line used to compile.
gcc -g ..\Source\main.c ..\Source\Window.c ..\Source\OpenGL.c ..\Source\Shader.c -lopengl32 -lgdi32 -o ..\Binary\Project.exe
Here is a snippet from the GDB console:
Program received signal SIGSEGV, Segmentation fault.
0x00000000 in ?? ()
(gdb) where
0x00000000 in ?? ()
0x00401991 in CreateShader (VertexShaderSource=0x26f018, >FragmentShaderSource=0x26f014) at ..\Source\Shader.c:5
0x00401432 in main () at ..\Source\Main.c:12
Here is the Code:
Main.c
#include "..\Header\Window.h"
#include "..\Header\Shader.h"
int main()
{
Project_CreateWindow("Project", 512, 512);
float Vertices[] = {-0.5f, -0.5f, 0.0f, 0.5f, -0.5f, 0.0f, 0.0f, 0.5f, 0.0f};
GLuint VBO;
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
GLuint RedShader = CreateShader();
glUseProgram(RedShader);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
for(;;)
{
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 3);
Project_UpdateWindow();
}
glDisableVertexAttribArray(0);
Project_DestroyWindow();
return 0;
}
Window.h
#ifndef WINDOW_HEADER
#define WINDOW_HEADER
#include <windows.h>
#include <GL\GL.h>
#include "..\Header\OpenGL.h"
void Project_CreateWindow(const char * Title, const unsigned short Width, const unsigned short Height);
void Project_DestroyWindow();
void Project_UpdateWindow();
#endif
Window.c
#include "..\Header\Window.h"
static HDC DeviceContext;
static HWND Window;
static HGLRC RenderContext;
LRESULT CALLBACK EventHandler(HWND Window, UINT Message, WPARAM WordParameter, LPARAM LongParameter);
void Project_CreateWindow(const char * Title, const unsigned short Width, const unsigned short Height)
{
WNDCLASSEX WindowClass = {sizeof(WNDCLASSEX), CS_OWNDC, EventHandler, 0, 0, GetModuleHandle(0), 0, 0, 0, 0, "WindowClass", 0};
RegisterClassEx(&WindowClass);
Window = CreateWindowEx(0, WindowClass.lpszClassName, Title, WS_OVERLAPPEDWINDOW, (GetSystemMetrics(SM_CXSCREEN) / 2) - (Width / 2), (GetSystemMetrics(SM_CYSCREEN) / 2) - (Height / 2), Width, Height, 0, 0, WindowClass.hInstance, 0);
PIXELFORMATDESCRIPTOR PixelFormat = {sizeof(PIXELFORMATDESCRIPTOR), 1, PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER, PFD_TYPE_RGBA, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0};
if ((DeviceContext = GetDC(Window)) == 0)
{
//TODO: Device Context Creation Failed
}
SetPixelFormat(DeviceContext, ChoosePixelFormat(DeviceContext, &PixelFormat), &PixelFormat);
if ((RenderContext = wglCreateContext(DeviceContext)) == 0)
{
//TODO: Render Context Creation Failed
}
if ((wglMakeCurrent(DeviceContext, RenderContext)) == 0)
{
//TODO: Make Context Current Failed
}
ShowWindow(Window, SW_SHOW);
GetOpenGLFunctionPointers();
}
void Project_DestroyWindow()
{
wglMakeCurrent(0, 0);
wglDeleteContext(RenderContext);
ReleaseDC(Window, DeviceContext);
DestroyWindow(Window);
}
void Project_UpdateWindow()
{
MSG Message;
if (PeekMessage(&Message, 0, 0, 0, PM_REMOVE))
{
TranslateMessage(&Message);
DispatchMessage(&Message);
}
SwapBuffers(DeviceContext);
}
LRESULT CALLBACK EventHandler(HWND Window, UINT Message, WPARAM WordParameter, LPARAM LongParameter)
{
return DefWindowProc(Window, Message, WordParameter, LongParameter);
}
OpenGL.h
#ifndef OPENGL_HEADER
#define OPENGL_HEADER
#include <stddef.h.>
#include <GL\GL.h>
#include <windows.h>
#include <Wingdi.h>
#define GL_ARRAY_BUFFER 0x8892
#define GL_STATIC_DRAW 0x88E4
#define GL_VERTEX_SHADER 0x8B31
#define GL_FRAGMENT_SHADER 0x8B30
typedef char GLchar;
typedef ptrdiff_t GLsizeiptr;
extern void (* glGenBuffers) (GLsizei n, GLuint *buffers);
extern void (* glBindBuffer) (GLenum target, GLuint buffer);
extern void (* glBufferData) (GLenum target, GLsizeiptr size, const void *data, GLenum usage);
extern void (* glEnableVertexAttribArray) (GLuint index);
extern void (* glVertexAttribPointer) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
extern void (* glDisableVertexAttribArray) (GLuint index);
extern GLuint (* glCreateProgram) (void);
extern GLuint (* glCreateShader) (GLenum type);
extern void (* glShaderSource) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
extern void (* glCompileShader) (GLuint shader);
extern void (* glAttachShader) (GLuint program, GLuint shader);
extern void (* glLinkProgram) (GLuint program);
extern void (* glUseProgram) (GLuint program);
void GetOpenGLFunctionPointers();
#endif
OpenGL.c
#include "..\Header\OpenGL.h"
#include <stdio.h>
void *GetOpenGLFunctionAddress(const char *name);
void (* glGenBuffers) (GLsizei n, GLuint *buffers);
void (* glBindBuffer) (GLenum target, GLuint buffer);
void (* glBufferData) (GLenum target, GLsizeiptr size, const void *data, GLenum usage);
void (* glEnableVertexAttribArray) (GLuint index);
void (* glVertexAttribPointer) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
void (* glDisableVertexAttribArray) (GLuint index);
GLuint (* glCreateProgram) (void);
GLuint (* glCreateShader) (GLenum type);
void (* glShaderSource) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
void (* glCompileShader) (GLuint shader);
void (* glAttachShader) (GLuint program, GLuint shader);
void (* glLinkProgram) (GLuint program);
void (* glUseProgram) (GLuint program);
void GetOpenGLFunctionPointers()
{
glGenBuffers = (void *)GetOpenGLFunctionAddress("glGenBuffers");
glBindBuffer = (void *)GetOpenGLFunctionAddress("glBindBuffer");
glBufferData = (void *)GetOpenGLFunctionAddress("glBufferData");
glEnableVertexAttribArray = (void *)GetOpenGLFunctionAddress("glEnableVertexAttribArray");
glVertexAttribPointer = (void *)GetOpenGLFunctionAddress("glVertexAttribPointer");
glDisableVertexAttribArray = (void *)GetOpenGLFunctionAddress("glDisableVertexAttribArray");
glCreateProgram = (void *)GetOpenGLFunctionAddress("glCreateProgram");
glCreateShader = (void *)GetOpenGLFunctionAddress("glCreateShader");
glShaderSource = (void *)GetOpenGLFunctionAddress("glShaderSource");
glCompileShader = (void *)GetOpenGLFunctionAddress("glCompileShader");
glAttachShader = (void *)GetOpenGLFunctionAddress("glAttachShader");
glLinkProgram = (void *)GetOpenGLFunctionAddress("glLinkProgram");
glUseProgram = (void *)GetOpenGLFunctionAddress("glUseProgram");
}
void *GetOpenGLFunctionAddress(const char *FunctionName)
{
void *FunctionPointer = (void *)wglGetProcAddress(FunctionName);
if(FunctionPointer == 0 || (FunctionPointer == (void*)0x1) || (FunctionPointer == (void*)0x2) || (FunctionPointer == (void*)0x3) || (FunctionPointer == (void*)-1))
{
HMODULE Module = LoadLibraryA("opengl32.dll");
FunctionPointer = (void *)GetProcAddress(Module, FunctionName);
if(FunctionPointer == 0 || (FunctionPointer == (void*)0x1) || (FunctionPointer == (void*)0x2) || (FunctionPointer == (void*)0x3) || (FunctionPointer == (void*)-1))
{
printf("%s %i", "Invalid OpenGL Function Pointer, Last Error:", GetLastError());
}
}
return FunctionPointer;
}
Shader.h
#ifndef SHADER_HEADER
#define SHADER_HEADER
#include "..\Header\OpenGL.h"
GLuint CreateShader();
#endif
Shader.c
#include "..\Header\Shader.h"
GLuint CreateShader()
{
const GLchar * VertexShaderSource = "#version 330 core\n layout(location = 0) in vec3 vertexPosition_modelspace;\n void main(){\n gl_Position.xyz = vertexPosition_modelspace;\n gl_Position.w = 1.0;\n }";
const GLchar * FragmentShaderSource = "#version 330 core\n out vec3 color;\n void main(){\n color = vec3(1.0f, 0.55f, 0.0f);\n }";
GLuint ShaderProgram = glCreateProgram();
GLuint VertexShader = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(VertexShader, 1, &VertexShaderSource, 0);
glShaderSource(FragmentShader, 1, &FragmentShaderSource, 0);
glCompileShader(VertexShader);
glCompileShader(FragmentShader);
glAttachShader(ShaderProgram, VertexShader);
glAttachShader(ShaderProgram, FragmentShader);
glLinkProgram(ShaderProgram);
return ShaderProgram;
}
Forgot the calling convention e.g.
extern void (* glGenBuffers) (GLsizei n, GLuint *buffers);
Should be changed to:
extern void (__stdcall * glGenBuffers) (GLsizei n, GLuint *buffers);
I'd like to add a menu system to my simple OpenGL program. I've used GLUT before but that was way back in 1998 and when I mentioned it here on SO you advised not to use GLUT and therefore I want to know what menu building libraries I can use, preferably platform-independent like GLUT, since I see GLUT is still used in many of the examples. My program doesn't use GLUT but I'd like to add menu system to learn more how to make I more complete program.
#include <windows.h>
#include <gl/gl.h>
LRESULT CALLBACK WindowProc(HWND, UINT, WPARAM, LPARAM);
void EnableOpenGL(HWND hwnd, HDC*, HGLRC*);
void DisableOpenGL(HWND, HDC, HGLRC);
int WINAPI WinMain(HINSTANCE hInstance,
HINSTANCE hPrevInstance,
LPSTR lpCmdLine,
int nCmdShow)
{
WNDCLASSEX wcex;
HWND hwnd;
HDC hDC;
HGLRC hRC;
MSG msg;
BOOL bQuit = FALSE;
float theta = 0.0f;
/* register window class */
wcex.cbSize = sizeof(WNDCLASSEX);
wcex.style = CS_OWNDC;
wcex.lpfnWndProc = WindowProc;
wcex.cbClsExtra = 0;
wcex.cbWndExtra = 0;
wcex.hInstance = hInstance;
wcex.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wcex.hCursor = LoadCursor(NULL, IDC_ARROW);
wcex.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH);
wcex.lpszMenuName = NULL;
wcex.lpszClassName = "GLSample";
wcex.hIconSm = LoadIcon(NULL, IDI_APPLICATION);;
if (!RegisterClassEx(&wcex))
return 0;
/* create main window */
hwnd = CreateWindowEx(0,
"GLSample",
"OpenGL Sample",
WS_OVERLAPPEDWINDOW,
CW_USEDEFAULT,
CW_USEDEFAULT,
256,
256,
NULL,
NULL,
hInstance,
NULL);
ShowWindow(hwnd, nCmdShow);
/* enable OpenGL for the window */
EnableOpenGL(hwnd, &hDC, &hRC);
/* program main loop */
while (!bQuit)
{
/* check for messages */
if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
{
/* handle or dispatch messages */
if (msg.message == WM_QUIT)
{
bQuit = TRUE;
}
else
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
}
else
{
/* OpenGL animation code goes here */
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glPushMatrix();
glRotatef(theta, 0.0f, 0.0f, 1.0f);
glBegin(GL_TRIANGLES);
glColor3f(1.0f, 0.0f, 0.0f); glVertex2f(0.0f, 1.0f);
glColor3f(0.0f, 1.0f, 0.0f); glVertex2f(0.87f, -0.5f);
glColor3f(0.0f, 0.0f, 1.0f); glVertex2f(-0.87f, -0.5f);
glEnd();
glPopMatrix();
SwapBuffers(hDC);
theta += 1.0f;
Sleep (1);
}
}
/* shutdown OpenGL */
DisableOpenGL(hwnd, hDC, hRC);
/* destroy the window explicitly */
DestroyWindow(hwnd);
return msg.wParam;
}
LRESULT CALLBACK WindowProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
{
switch (uMsg)
{
case WM_CLOSE:
PostQuitMessage(0);
break;
case WM_DESTROY:
return 0;
case WM_KEYDOWN:
{
switch (wParam)
{
case VK_ESCAPE:
PostQuitMessage(0);
break;
}
}
break;
default:
return DefWindowProc(hwnd, uMsg, wParam, lParam);
}
return 0;
}
void EnableOpenGL(HWND hwnd, HDC* hDC, HGLRC* hRC)
{
PIXELFORMATDESCRIPTOR pfd;
int iFormat;
/* get the device context (DC) */
*hDC = GetDC(hwnd);
/* set the pixel format for the DC */
ZeroMemory(&pfd, sizeof(pfd));
pfd.nSize = sizeof(pfd);
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW |
PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 24;
pfd.cDepthBits = 16;
pfd.iLayerType = PFD_MAIN_PLANE;
iFormat = ChoosePixelFormat(*hDC, &pfd);
SetPixelFormat(*hDC, iFormat, &pfd);
/* create and enable the render context (RC) */
*hRC = wglCreateContext(*hDC);
wglMakeCurrent(*hDC, *hRC);
}
void DisableOpenGL (HWND hwnd, HDC hDC, HGLRC hRC)
{
wglMakeCurrent(NULL, NULL);
wglDeleteContext(hRC);
ReleaseDC(hwnd, hDC);
}
For example in the answer here
OpenGL - GLUT - Displaying different pop-up menus
It says not to use GLUT but not what to use instead so it doesn't really say where to begin. Can you tell me what to use instead of GLUT?
For platform-independent OpenGL development, use a cross-platform GUI toolkit like Qt or wxWidgets.
Of these I only have personal experience with Qt's OpenGL module. It comes with many examples of how to set up an OpenGL rendering context and interact with it with the mouse and keyboard. It will allow you to pop up a menu when you right click in your scene. Qt also comes with utility classes for vector and matrix manipulation.