The code compiles but crashes when run.
I think it is VertexShaderSource and FragmentShaderSource that are causing problems but I can't seem to find it.
I have been trying to solve this for days now, so your help would be much appreciated.
If the following lines of code are removed the program seems to run as intended.
GLuint RedShader = CreateShader();
glUseProgram(RedShader);
Here is the command line used to compile.
gcc -g ..\Source\main.c ..\Source\Window.c ..\Source\OpenGL.c ..\Source\Shader.c -lopengl32 -lgdi32 -o ..\Binary\Project.exe
Here is a snippet from the GDB console:
Program received signal SIGSEGV, Segmentation fault.
0x00000000 in ?? ()
(gdb) where
0x00000000 in ?? ()
0x00401991 in CreateShader (VertexShaderSource=0x26f018, >FragmentShaderSource=0x26f014) at ..\Source\Shader.c:5
0x00401432 in main () at ..\Source\Main.c:12
Here is the Code:
Main.c
#include "..\Header\Window.h"
#include "..\Header\Shader.h"
int main()
{
Project_CreateWindow("Project", 512, 512);
float Vertices[] = {-0.5f, -0.5f, 0.0f, 0.5f, -0.5f, 0.0f, 0.0f, 0.5f, 0.0f};
GLuint VBO;
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
GLuint RedShader = CreateShader();
glUseProgram(RedShader);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
for(;;)
{
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 3);
Project_UpdateWindow();
}
glDisableVertexAttribArray(0);
Project_DestroyWindow();
return 0;
}
Window.h
#ifndef WINDOW_HEADER
#define WINDOW_HEADER
#include <windows.h>
#include <GL\GL.h>
#include "..\Header\OpenGL.h"
void Project_CreateWindow(const char * Title, const unsigned short Width, const unsigned short Height);
void Project_DestroyWindow();
void Project_UpdateWindow();
#endif
Window.c
#include "..\Header\Window.h"
static HDC DeviceContext;
static HWND Window;
static HGLRC RenderContext;
LRESULT CALLBACK EventHandler(HWND Window, UINT Message, WPARAM WordParameter, LPARAM LongParameter);
void Project_CreateWindow(const char * Title, const unsigned short Width, const unsigned short Height)
{
WNDCLASSEX WindowClass = {sizeof(WNDCLASSEX), CS_OWNDC, EventHandler, 0, 0, GetModuleHandle(0), 0, 0, 0, 0, "WindowClass", 0};
RegisterClassEx(&WindowClass);
Window = CreateWindowEx(0, WindowClass.lpszClassName, Title, WS_OVERLAPPEDWINDOW, (GetSystemMetrics(SM_CXSCREEN) / 2) - (Width / 2), (GetSystemMetrics(SM_CYSCREEN) / 2) - (Height / 2), Width, Height, 0, 0, WindowClass.hInstance, 0);
PIXELFORMATDESCRIPTOR PixelFormat = {sizeof(PIXELFORMATDESCRIPTOR), 1, PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER, PFD_TYPE_RGBA, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0};
if ((DeviceContext = GetDC(Window)) == 0)
{
//TODO: Device Context Creation Failed
}
SetPixelFormat(DeviceContext, ChoosePixelFormat(DeviceContext, &PixelFormat), &PixelFormat);
if ((RenderContext = wglCreateContext(DeviceContext)) == 0)
{
//TODO: Render Context Creation Failed
}
if ((wglMakeCurrent(DeviceContext, RenderContext)) == 0)
{
//TODO: Make Context Current Failed
}
ShowWindow(Window, SW_SHOW);
GetOpenGLFunctionPointers();
}
void Project_DestroyWindow()
{
wglMakeCurrent(0, 0);
wglDeleteContext(RenderContext);
ReleaseDC(Window, DeviceContext);
DestroyWindow(Window);
}
void Project_UpdateWindow()
{
MSG Message;
if (PeekMessage(&Message, 0, 0, 0, PM_REMOVE))
{
TranslateMessage(&Message);
DispatchMessage(&Message);
}
SwapBuffers(DeviceContext);
}
LRESULT CALLBACK EventHandler(HWND Window, UINT Message, WPARAM WordParameter, LPARAM LongParameter)
{
return DefWindowProc(Window, Message, WordParameter, LongParameter);
}
OpenGL.h
#ifndef OPENGL_HEADER
#define OPENGL_HEADER
#include <stddef.h.>
#include <GL\GL.h>
#include <windows.h>
#include <Wingdi.h>
#define GL_ARRAY_BUFFER 0x8892
#define GL_STATIC_DRAW 0x88E4
#define GL_VERTEX_SHADER 0x8B31
#define GL_FRAGMENT_SHADER 0x8B30
typedef char GLchar;
typedef ptrdiff_t GLsizeiptr;
extern void (* glGenBuffers) (GLsizei n, GLuint *buffers);
extern void (* glBindBuffer) (GLenum target, GLuint buffer);
extern void (* glBufferData) (GLenum target, GLsizeiptr size, const void *data, GLenum usage);
extern void (* glEnableVertexAttribArray) (GLuint index);
extern void (* glVertexAttribPointer) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
extern void (* glDisableVertexAttribArray) (GLuint index);
extern GLuint (* glCreateProgram) (void);
extern GLuint (* glCreateShader) (GLenum type);
extern void (* glShaderSource) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
extern void (* glCompileShader) (GLuint shader);
extern void (* glAttachShader) (GLuint program, GLuint shader);
extern void (* glLinkProgram) (GLuint program);
extern void (* glUseProgram) (GLuint program);
void GetOpenGLFunctionPointers();
#endif
OpenGL.c
#include "..\Header\OpenGL.h"
#include <stdio.h>
void *GetOpenGLFunctionAddress(const char *name);
void (* glGenBuffers) (GLsizei n, GLuint *buffers);
void (* glBindBuffer) (GLenum target, GLuint buffer);
void (* glBufferData) (GLenum target, GLsizeiptr size, const void *data, GLenum usage);
void (* glEnableVertexAttribArray) (GLuint index);
void (* glVertexAttribPointer) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
void (* glDisableVertexAttribArray) (GLuint index);
GLuint (* glCreateProgram) (void);
GLuint (* glCreateShader) (GLenum type);
void (* glShaderSource) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
void (* glCompileShader) (GLuint shader);
void (* glAttachShader) (GLuint program, GLuint shader);
void (* glLinkProgram) (GLuint program);
void (* glUseProgram) (GLuint program);
void GetOpenGLFunctionPointers()
{
glGenBuffers = (void *)GetOpenGLFunctionAddress("glGenBuffers");
glBindBuffer = (void *)GetOpenGLFunctionAddress("glBindBuffer");
glBufferData = (void *)GetOpenGLFunctionAddress("glBufferData");
glEnableVertexAttribArray = (void *)GetOpenGLFunctionAddress("glEnableVertexAttribArray");
glVertexAttribPointer = (void *)GetOpenGLFunctionAddress("glVertexAttribPointer");
glDisableVertexAttribArray = (void *)GetOpenGLFunctionAddress("glDisableVertexAttribArray");
glCreateProgram = (void *)GetOpenGLFunctionAddress("glCreateProgram");
glCreateShader = (void *)GetOpenGLFunctionAddress("glCreateShader");
glShaderSource = (void *)GetOpenGLFunctionAddress("glShaderSource");
glCompileShader = (void *)GetOpenGLFunctionAddress("glCompileShader");
glAttachShader = (void *)GetOpenGLFunctionAddress("glAttachShader");
glLinkProgram = (void *)GetOpenGLFunctionAddress("glLinkProgram");
glUseProgram = (void *)GetOpenGLFunctionAddress("glUseProgram");
}
void *GetOpenGLFunctionAddress(const char *FunctionName)
{
void *FunctionPointer = (void *)wglGetProcAddress(FunctionName);
if(FunctionPointer == 0 || (FunctionPointer == (void*)0x1) || (FunctionPointer == (void*)0x2) || (FunctionPointer == (void*)0x3) || (FunctionPointer == (void*)-1))
{
HMODULE Module = LoadLibraryA("opengl32.dll");
FunctionPointer = (void *)GetProcAddress(Module, FunctionName);
if(FunctionPointer == 0 || (FunctionPointer == (void*)0x1) || (FunctionPointer == (void*)0x2) || (FunctionPointer == (void*)0x3) || (FunctionPointer == (void*)-1))
{
printf("%s %i", "Invalid OpenGL Function Pointer, Last Error:", GetLastError());
}
}
return FunctionPointer;
}
Shader.h
#ifndef SHADER_HEADER
#define SHADER_HEADER
#include "..\Header\OpenGL.h"
GLuint CreateShader();
#endif
Shader.c
#include "..\Header\Shader.h"
GLuint CreateShader()
{
const GLchar * VertexShaderSource = "#version 330 core\n layout(location = 0) in vec3 vertexPosition_modelspace;\n void main(){\n gl_Position.xyz = vertexPosition_modelspace;\n gl_Position.w = 1.0;\n }";
const GLchar * FragmentShaderSource = "#version 330 core\n out vec3 color;\n void main(){\n color = vec3(1.0f, 0.55f, 0.0f);\n }";
GLuint ShaderProgram = glCreateProgram();
GLuint VertexShader = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(VertexShader, 1, &VertexShaderSource, 0);
glShaderSource(FragmentShader, 1, &FragmentShaderSource, 0);
glCompileShader(VertexShader);
glCompileShader(FragmentShader);
glAttachShader(ShaderProgram, VertexShader);
glAttachShader(ShaderProgram, FragmentShader);
glLinkProgram(ShaderProgram);
return ShaderProgram;
}
Forgot the calling convention e.g.
extern void (* glGenBuffers) (GLsizei n, GLuint *buffers);
Should be changed to:
extern void (__stdcall * glGenBuffers) (GLsizei n, GLuint *buffers);
Related
This question already has answers here:
Perspective correct texturing of trapezoid in OpenGL ES 2.0
(3 answers)
Closed 1 year ago.
I'm trying to apply a square texture to a trapezoid-like shape in OpenGL but I get some distortion. I've been reading a lot on possible solutions and the one that seems most convenient requires modifying the "q" texture coordinates. This is done using GlTexCoord functions in the solution; however, I'm using vertex buffers and I don't know how I can use them to change this coordinate this way. The texture init in GLSL takes a vec2; so I have no idea how I would pass anything but two-dimensional texture coordinates to it.
main.c
//C libs
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
//GL libs (Need to have set up to run this)
#include <glad/glad.h>
#include <GLFW/glfw3.h>
//Libs in folder
#include "shader.h"
#include "image.h"
//Window consts
#define WINDOW_HEIGHT 500
#define WINDOW_WIDTH 500
#define WINDOW_NAME "ForStackOverflow"
//Shader consts
#define VERTEX_SHADER_FILE "vertex_shader.glsl"
#define FRAGMENT_SHADER_FILE "fragment_shader.glsl"
//Vertex constants
#define POSITION_ATTRIBUTE_LOC 0
#define TEXTURE_COORD_ATTRIBUTE_LOC 1
#define POSITION_SIZE 2
#define TEXTURE_COORD_SIZE 2
#define VERTEX_SIZE (POSITION_SIZE + TEXTURE_COORD_SIZE) //Amount of floats per vertex
#define POSITION_OFFSET 0
#define TEXTURE_COORD_OFFSET (POSITION_SIZE * sizeof(float))
#define STRIDE (sizeof(float) * VERTEX_SIZE)
//Functions
static void framebuffer_size_callback(GLFWwindow*, int, int);
static unsigned int load_bmp_texture(const char* name);
int main()
{
printf("Running!\n");
//*GLFW
if (!glfwInit())
{
printf("GLFW init fail\n");
return -1;
}
//3.3 core
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
//*Window object
GLFWwindow* window = glfwCreateWindow(WINDOW_WIDTH, WINDOW_HEIGHT, WINDOW_NAME, NULL, NULL);
if (window == NULL)
{
printf("GLFW window fail\n");
return -1;
}
glfwMakeContextCurrent(window);
//*GLAD
if (!gladLoadGLLoader((GLADloadproc) &glfwGetProcAddress))
{
printf("GLAD init fail");
glfwTerminate();
return -1;
}
//*Window
glViewport(0, 0, WINDOW_WIDTH, WINDOW_HEIGHT);
glfwSetFramebufferSizeCallback(window, &framebuffer_size_callback);
//*Shaders
ShaderProgram shader_program;
if (!shader_program_init(&shader_program, VERTEX_SHADER_FILE, FRAGMENT_SHADER_FILE)) {
glfwTerminate();
return -1;
}
//*Triangle rendering
//Vertices
float tri_vertices[4 * VERTEX_SIZE] = { //FORM A TRAPEZOID
//Position //Texture coordinates
-0.5f, 0.5f, 0.0f, 1.0f, //Top-left
-0.5f, -0.5f, 0.0f, 0.0f, //Bottom-left
0.5f, 0.75f, 1.0f, 1.0f, //Top-right
0.5f, -0.75f, 1.0f, 0.0f //Bottom-right
};
//Indices
unsigned int tri_indices[6] = {
2, 0, 1, //Top-right, top-left, bottom-left
2, 3, 1 //Top-right, bottom-right, bottom-left
};
//VAO
unsigned int tri_vao;
glGenVertexArrays(1, &tri_vao);
glBindVertexArray(tri_vao);
//VBO
unsigned int tri_vbo;
glGenBuffers(1, &tri_vbo);
glBindBuffer(GL_ARRAY_BUFFER, tri_vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(tri_vertices), tri_vertices, GL_STATIC_DRAW);
//EBO
unsigned int tri_ebo;
glGenBuffers(1, &tri_ebo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, tri_ebo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(tri_indices), tri_indices, GL_STATIC_DRAW);
//Config
//Position
glVertexAttribPointer(POSITION_ATTRIBUTE_LOC, POSITION_SIZE, GL_FLOAT, GL_FALSE, STRIDE, (void*) POSITION_OFFSET);
glEnableVertexAttribArray(POSITION_ATTRIBUTE_LOC);
//Texture coordinates
glVertexAttribPointer(TEXTURE_COORD_ATTRIBUTE_LOC, TEXTURE_COORD_SIZE, GL_FLOAT, GL_FALSE, STRIDE, (void*) TEXTURE_COORD_OFFSET);
glEnableVertexAttribArray(TEXTURE_COORD_ATTRIBUTE_LOC);
//*Textures
unsigned int brick_tex = load_bmp_texture("purple_bricks.bmp");
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, brick_tex);
//Attaching to uniform
glUseProgram(shader_program);
glUniform1i(glGetUniformLocation(shader_program, "brick"), 0);
//*Rendering setup
//Shader
glUseProgram(shader_program);
//*Main loop
while (!glfwWindowShouldClose(window))
{
//*Blittering
//Background
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
//Triangles
glBindVertexArray(tri_vao);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, tri_ebo);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, (void*) 0);
//*Buffer, events
glfwSwapBuffers(window);
glfwPollEvents();
}
//*End program
glDeleteVertexArrays(1, &tri_vao);
glDeleteBuffers(1, &tri_vbo);
glDeleteBuffers(1, &tri_ebo);
glfwTerminate();
return 0;
}
//Centers the OpenGL part of the window and keeps the same width and height
static void framebuffer_size_callback(GLFWwindow* window, int width, int height) {
glViewport((width - WINDOW_WIDTH) / 2, (height - WINDOW_HEIGHT) / 2, WINDOW_WIDTH, WINDOW_HEIGHT);
}
//Loads and sets up a BMP texture
static unsigned int load_bmp_texture(const char* name) {
//Loading into array
RGBImage image;
read_bmp(name, &image);
//Generating texture in GL
unsigned int texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
//Setting mapping
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); //X
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); //Y
//Setting filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
//Setting texture and mipmap
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.width, image.height, 0, GL_RGB, GL_UNSIGNED_BYTE, image.data);
glGenerateMipmap(GL_TEXTURE_2D);
//Freeing image array
free_RGBImage(image);
return texture;
}
image.h
//Code for loading a bmp file as an array
//Definitely not part of the problem
//24 bit bmps
//C libs
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
//Prevent struct packing
#pragma pack(1)
typedef struct RGB {
unsigned char R;
unsigned char G;
unsigned char B;
} RGB;
typedef struct RGBImage {
int width;
int height;
RGB* data;
} RGBImage;
typedef struct BMPFileHeader {
char name[2];
uint32_t size;
uint32_t garbage;
uint32_t image_offset;
} BMPFileHeader;
typedef struct BMPInfoHeader {
uint32_t header_size;
uint32_t width;
uint32_t height;
uint16_t color_planes;
uint16_t bits_per_pixel;
uint32_t compression;
uint32_t image_size;
} BMPInfoHeader;
void free_RGBImage(RGBImage image) {
free(image.data);
}
bool read_bmp(const char* file_name, RGBImage* image) {
FILE* fp = fopen(file_name, "rb");
if (fp == NULL) {
printf("Couldn't open %s\n", file_name);
return false;
}
BMPFileHeader file_header;
fread(file_header.name, sizeof(BMPFileHeader), 1, fp);
if ((file_header.name[0] != 'B') || (file_header.name[1] != 'M')) {
fclose(fp);
return false;
}
BMPInfoHeader info_header;
fread(&info_header, sizeof(BMPInfoHeader), 1, fp);
if ((info_header.header_size != 40) || (info_header.compression != 0) || (info_header.bits_per_pixel != 24)) {
fclose(fp);
return false;
}
fseek(fp, file_header.image_offset, SEEK_SET);
image->width = info_header.width;
image->height = info_header.height;
image->data = (RGB*) malloc(image->height * image->width * sizeof(RGB));
for (int i = 0; i < image->height; i++) {
fread(&image->data[i * image->width], sizeof(RGB), image->width, fp);
}
int R;
for (int i = 0; i < image->height * image->width; i++) {
R = image->data[i].R;
image->data[i].R = image->data[i].B;
image->data[i].B = R;
}
fclose(fp);
return true;
}
shader.h
//Code for compiling and linking a shader
//Definitely not part of the problem
//C libs
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
//GL libs
#include <glad/glad.h>
#include <GLFW/glfw3.h>
//Consts
#define INFO_LOG_SIZE 512
static bool _glad_is_init();
static bool _glfw_is_init();
typedef unsigned int ShaderProgram;
bool shader_program_init(ShaderProgram* shader_program, char* vertex_shader_file_name, char* fragment_shader_file_name)
{
if (!_glfw_is_init()) {
printf("Shader: glfw uninitialized\n");
return 0;
}
else if (!_glad_is_init()) {
printf("Shader: glad uninitialized\n");
return 0;
}
long int file_size;
size_t new_source_length;
FILE* fp;
//*Reading vertex shader file
//Open
fp = fopen(vertex_shader_file_name, "r");
if (fp == NULL) {
printf("Couldn't open vertex shader file\n");
return 0;
}
//Find length for buffer
fseek(fp, 0L, SEEK_END);
file_size = ftell(fp);
if (file_size == -1) {
printf("Couldn't seek end of file\n");
return 0;
}
rewind(fp);
char vertex_shader_source[(file_size + 1) * sizeof(char)];
//Read
new_source_length = fread(vertex_shader_source, sizeof(char), file_size, fp);
if (ferror(fp) != 0) {
printf("Error when reading file\n");
return 0;
}
//Add string termination
vertex_shader_source[new_source_length] = '\0';
//Close
fclose(fp);
//*Reading fragment shader
//Open
fp = fopen(fragment_shader_file_name, "r");
if (fp == NULL) {
printf("Couldn't open fragment shader file\n");
return 0;
}
//Find length for buffer
fseek(fp, 0L, SEEK_END);
file_size = ftell(fp);
if (file_size == -1) {
printf("Couldn't seek end of file\n");
return 0;
}
rewind(fp);
char fragment_shader_source[(file_size + 1) * sizeof(char)];
//Read
new_source_length = fread(fragment_shader_source, sizeof(char), file_size, fp);
if (ferror(fp) != 0) {
printf("Error reading file\n");
return 0;
}
//Add string termination
fragment_shader_source[new_source_length] = '\0';
//Close
fclose(fp);
//*Compiling
//For error checking
int success;
char infolog[INFO_LOG_SIZE];
const char* vertex_shader_code = vertex_shader_source; //vertex_shader_source is of type char foo[n], a VLA.
const char* fragment_shader_code = fragment_shader_source;
//Vertex
unsigned int vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader, 1, &vertex_shader_code, NULL);
glCompileShader(vertex_shader);
glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &success);
if (!success) {
glGetShaderInfoLog(vertex_shader, INFO_LOG_SIZE, NULL, infolog);
printf("Vertex shader compile fail: \n%s\n", infolog);
return 0;
}
//Fragment
unsigned int fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader, 1, &fragment_shader_code, NULL);
glCompileShader(fragment_shader);
glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &success);
if (!success) {
glGetShaderInfoLog(fragment_shader, INFO_LOG_SIZE, NULL, infolog);
printf("Fragment shader compile fail: \n%s\n", infolog);
return 0;
}
//Program
*shader_program = glCreateProgram();
glAttachShader(*shader_program, vertex_shader);
glAttachShader(*shader_program, fragment_shader);
glLinkProgram(*shader_program);
glGetProgramiv(*shader_program, GL_LINK_STATUS, &success);
if (!success) {
glGetProgramInfoLog(*shader_program, INFO_LOG_SIZE, NULL, infolog);
printf("Shader program compile fail: \n%s\n", infolog);
return 0;
}
//Deleting
glDeleteShader(vertex_shader);
glDeleteShader(fragment_shader);
return 1;
}
bool _glfw_is_init() {
if (!glfwInit())
{
return false;
}
return true;
}
bool _glad_is_init() {
if (!gladLoadGLLoader((GLADloadproc) &glfwGetProcAddress))
{
return false;
}
return true;
}
vertex_shader.glsl
#version 330 core
layout (location = 0) in vec2 vertex_position;
layout (location = 1) in vec2 vertex_texture_coordinate;
out vec2 texture_coordinate;
void main()
{
gl_Position = vec4(vertex_position, 1.0, 1.0);
texture_coordinate = vertex_texture_coordinate;
};
fragment_shader.glsl
#version 330 core
in vec4 color;
in vec2 texture_coordinate;
out vec4 FragColor;
uniform sampler2D brick;
void main()
{
FragColor = texture(brick, texture_coordinate);
};
Texture used
Result of program
Edit: for those reading in the future, this link helped a lot with implementing the method described in the answer below:
https://www.cs.cmu.edu/~16385/s17/Slides/10.2_2D_Alignment__DLT.pdf
Given the 2d vertex coordinates P[i] and the 2d texture coordinates T[i] you need to find the homography that maps from T[i] to P[i]. The homography H is represented with a 3x3 matrix (up to a scaling factor) and can be calculated, for example, with the direct linear transform method. Beware that it involves solving a system of eight equations with eight/nine unknowns -- so it's best to resort to an existing library, like LAPACK, for that.
The homography satisfies the relations
H * T[i] ~ P[i]
as an equivalence on the projective plane. I.e.
H * (T[i], 1) = a[i] * (P[i], 1)
for some scaling factors a[i].
The trick to get the correct texture mapping is to replace the 2d vertex coordinates with 3d homogeneous ones:
homogeneous P[i] = H * (T[i], 1)
The vertex shader will then receive a vec3 vertex position, and copy the 3rd coordinate to gl_Position.w:
layout (location = 0) in vec3 vertex_position;
...
void main() {
gl_Position = vec4(vertex_position.xy, 0, vertex_position.z);
...
}
Here are the results I get with this method (corrected/uncorrected):
The following opengl code is specifically made to be very GPU-heavy and this forces the CPU to wait for a bit while the GPU finishes its work. In particular, it does so at the glFinish() call where the CPU waits for a measured 99.87% of the time per frame. The program runs at 10 fps on my system (windows 10, gtx1070) and Vsync disabled. This is all expected, if it wasn't for the fact that while the CPU is supposed to wait, it inexplicably uses 100% CPU time, causing overheating.
After testing on 6 systems with intel gpus, 4 with amd gpus and 5 with nvidia gpus, only those with nvidia gpus have the problem. All i can conclude so far is that this problem is nvidia-specific and opengl-specific. Directx apps don't show the problem, in fact, it is possible to reproduce this problem on Firefox by running a gpu-maxing webgl page with ANGLE disabled (doesn't happen with angle enabled).
I compile with the following:
C:\mingw64\bin\x86_64-w64-mingw32-gcc.exe %~dp0\main.c -o %~dp0\main.exe -static-libgcc -std=c11 -ggdb -O2 -Wall -BC:\mingw64\bin\ -LC:\mingw64\lib\ -IC:\mingw64\include\ -lgdi32 -lopengl32
Minimal code (i suggest to tweak the fragment shader so that you hit around 10 fps as well, makes the problem more apparent):
#include <windows.h>
#include <GL/gl.h>
typedef signed long long int GLsizeiptr;
typedef char GLchar;
#define GL_ARRAY_BUFFER 0x8892
#define GL_DYNAMIC_DRAW 0x88E8
#define GL_FRAGMENT_SHADER 0x8B30
#define GL_VERTEX_SHADER 0x8B31
LRESULT CALLBACK WndProc(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam) {
return DefWindowProc(hwnd, msg, wParam, lParam);
}
int main() {
HDC hdc;
{
WNDCLASS wc;
memset(&wc, 0, sizeof(wc));
wc.style = CS_OWNDC;
wc.lpfnWndProc = WndProc;
wc.lpszClassName = "gldemo";
if (!RegisterClass(&wc)) return 0;
HWND hwnd = CreateWindow("gldemo", "Demo", WS_POPUP, 0, 0, 1920/2, 1080/2, 0, 0, NULL, 0);
hdc = GetDC(hwnd);
const PIXELFORMATDESCRIPTOR pfd = {0,0, PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER ,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
SetPixelFormat(hdc, ChoosePixelFormat(hdc, &pfd), &pfd);
wglMakeCurrent(hdc, wglCreateContext(hdc));
ShowWindow(hwnd, SW_SHOW);
}
void (*glGenBuffers)(GLsizei, GLuint *) = (void*)wglGetProcAddress("glGenBuffers");
void (*glBindBuffer)(GLenum, GLuint) = (void*)wglGetProcAddress("glBindBuffer");
void (*glBufferData)(GLenum, GLsizeiptr, void *, GLenum) = (void*)wglGetProcAddress("glBufferData");
GLuint (*glCreateShader)(GLuint) = (void*)wglGetProcAddress("glCreateShader");
void (*glAttachShader)(GLuint, GLuint) = (void*)wglGetProcAddress("glAttachShader");
void (*glCompileShader)(GLuint) = (void*)wglGetProcAddress("glCompileShader");
void (*glShaderSource)(GLuint, GLuint, const char **, const GLint *) = (void*)wglGetProcAddress("glShaderSource");
void (*glEnableVertexAttribArray)(GLuint) = (void*)wglGetProcAddress("glEnableVertexAttribArray");
GLuint (*glGetAttribLocation)(GLuint, GLchar *) = (void*)wglGetProcAddress("glGetAttribLocation");
void (*glVertexAttribPointer)(GLuint, GLint, GLenum, GLboolean, GLsizei, void *) = (void*)wglGetProcAddress("glVertexAttribPointer");
GLuint (*glCreateProgram)() = (void*)wglGetProcAddress("glCreateProgram");
void (*glLinkProgram)(GLuint) = (void*)wglGetProcAddress("glLinkProgram");
void (*glUseProgram)(GLuint) = (void*)wglGetProcAddress("glUseProgram");
const char *g_vertCode =
"#version 420\n"
"in vec3 vertexPosition;"
"void main() {gl_Position = vec4(vertexPosition.xyz, 1.);}";
const char *g_fragCode =
"#version 420\n"
"void main() {"
"float res = 0.5;"
"for (int t=0;t<58000;t++) {" // tweak this to make sure you're outputting ~10 fps. 58000 is ok for a gtx 1070.
"res = fract(sin(dot(gl_FragCoord.xy+res, vec2(12.9898,78.233))) * 43758.5453);"
"}"
"gl_FragColor = vec4(vec3(res)*0.4, 1.0);"
"}";
GLuint prog = glCreateProgram();
GLuint vertshader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertshader, 1, &g_vertCode, 0);
glCompileShader(vertshader);
glAttachShader(prog, vertshader);
GLuint fragshader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragshader, 1, &g_fragCode, 0);
glCompileShader(fragshader);
glAttachShader(prog, fragshader);
glLinkProgram(prog);
glUseProgram(prog);
GLuint attribVertexPosition = glGetAttribLocation(prog, "vertexPosition");
float verts[4*3] = {1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, -1.0f, -1.0f, 0.0f};
GLuint vboId;
glGenBuffers(1, &vboId);
glBindBuffer(GL_ARRAY_BUFFER, vboId);
glBufferData(GL_ARRAY_BUFFER, 4*3*4, verts, GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(attribVertexPosition);
glVertexAttribPointer(attribVertexPosition, 3, GL_FLOAT, 0, 12, (void*)0);
for (;;) {
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
//__asm__("int $3");
glFinish(); // Halts here with 100% cpu on my system.
SwapBuffers(hdc);
MSG msg; char done = 0;
while (PeekMessage(&msg, 0, 0, 0, PM_REMOVE)) {
if (msg.message == WM_QUIT) done = 1;
TranslateMessage(&msg);
DispatchMessage(&msg);
}
if (done) break;
}
return 0;
}
This isn't an answer, so i won't mark it as one, but i can provide a workaround for now.
The idea is that the glFlush function guarantees that the gpu has started its processing, so we can, on paper, insert a wait after glFlush and before glFinish to cut down on the amount of time glFinish spends frying the cpu.
This is only viable if the resolution of the waiting function is at worst 1ms. If you use Sleep, or any other function in the Windows API that has a tweakable timeout, this can be set by calling timeBeginPeriod(1) at the start of the program.
To make sure the wait doesn't overshoot, which can cause the gpu to become idle, the formula used for waiting in the next frame is the time just spent by the wait plus the residual time spent on glFinish minus 0.5 milliseconds.
The result of all this is that the gpu stays well-fed at 100% usage according to the Task Manager yet the cpu utilization only starts to increase significantly if the time that vanilla glFinish would have spent gets lower than 4 ms and the mechanism disengages itself entirely if it gets lower than roughly 1.5 ms.
In an actual implementation one might consider inserting the SwapBuffers call inside the timed area to include in the calculation whatever frame-limiting systems might be in place and perhaps using Waitable Timers instead of Sleep.
Compiling with:
C:\mingw64\bin\x86_64-w64-mingw32-gcc.exe %~dp0\main.c -o %~dp0\main.exe -static-libgcc -std=c11 -ggdb -O2 -Wall -BC:\mingw64\bin\ -LC:\mingw64\lib\ -IC:\mingw64\include\ -lgdi32 -lopengl32 -lwinmm
Updated code:
#include <windows.h>
#include <stdio.h>
#include <GL/gl.h>
typedef signed long long int GLsizeiptr;
typedef char GLchar;
#define GL_ARRAY_BUFFER 0x8892
#define GL_DYNAMIC_DRAW 0x88E8
#define GL_FRAGMENT_SHADER 0x8B30
#define GL_VERTEX_SHADER 0x8B31
LARGE_INTEGER Frequency;
unsigned long long int elapsedMicroseconds(LARGE_INTEGER StartingTime) {
LARGE_INTEGER EndingTime;
QueryPerformanceCounter(&EndingTime);
return (1000000*(EndingTime.QuadPart - StartingTime.QuadPart))/Frequency.QuadPart;
}
LRESULT CALLBACK WndProc(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam) {
return DefWindowProc(hwnd, msg, wParam, lParam);
}
int main() {
QueryPerformanceFrequency(&Frequency);
timeBeginPeriod(1);
HDC hdc;
{
WNDCLASS wc;
memset(&wc, 0, sizeof(wc));
wc.style = CS_OWNDC;
wc.lpfnWndProc = WndProc;
wc.lpszClassName = "gldemo";
if (!RegisterClass(&wc)) return 0;
HWND hwnd = CreateWindow("gldemo", "Demo", WS_POPUP, 0, 0, 1920/2, 1080/2, 0, 0, NULL, 0);
hdc = GetDC(hwnd);
const PIXELFORMATDESCRIPTOR pfd = {0,0, PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER ,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
SetPixelFormat(hdc, ChoosePixelFormat(hdc, &pfd), &pfd);
wglMakeCurrent(hdc, wglCreateContext(hdc));
ShowWindow(hwnd, SW_SHOW);
}
void (*glGenBuffers)(GLsizei, GLuint *) = (void*)wglGetProcAddress("glGenBuffers");
void (*glBindBuffer)(GLenum, GLuint) = (void*)wglGetProcAddress("glBindBuffer");
void (*glBufferData)(GLenum, GLsizeiptr, void *, GLenum) = (void*)wglGetProcAddress("glBufferData");
GLuint (*glCreateShader)(GLuint) = (void*)wglGetProcAddress("glCreateShader");
void (*glAttachShader)(GLuint, GLuint) = (void*)wglGetProcAddress("glAttachShader");
void (*glCompileShader)(GLuint) = (void*)wglGetProcAddress("glCompileShader");
void (*glShaderSource)(GLuint, GLuint, const char **, const GLint *) = (void*)wglGetProcAddress("glShaderSource");
void (*glEnableVertexAttribArray)(GLuint) = (void*)wglGetProcAddress("glEnableVertexAttribArray");
GLuint (*glGetAttribLocation)(GLuint, GLchar *) = (void*)wglGetProcAddress("glGetAttribLocation");
void (*glVertexAttribPointer)(GLuint, GLint, GLenum, GLboolean, GLsizei, void *) = (void*)wglGetProcAddress("glVertexAttribPointer");
GLuint (*glCreateProgram)() = (void*)wglGetProcAddress("glCreateProgram");
void (*glLinkProgram)(GLuint) = (void*)wglGetProcAddress("glLinkProgram");
void (*glUseProgram)(GLuint) = (void*)wglGetProcAddress("glUseProgram");
const char *g_vertCode =
"#version 420\n"
"in vec3 vertexPosition;"
"void main() {gl_Position = vec4(vertexPosition.xyz, 1.);}";
const char *g_fragCode =
"#version 420\n"
"void main() {"
"float res = 0.5;"
"for (int t=0;t<58000;t++) {" // tweak this to make sure you're outputting ~10 fps. 58000 is ok for a gtx 1070.
"res = fract(sin(dot(gl_FragCoord.xy+res, vec2(12.9898,78.233))) * 43758.5453);"
"}"
"gl_FragColor = vec4(vec3(res)*0.4, 1.0);"
"}";
GLuint prog = glCreateProgram();
GLuint vertshader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertshader, 1, &g_vertCode, 0);
glCompileShader(vertshader);
glAttachShader(prog, vertshader);
GLuint fragshader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragshader, 1, &g_fragCode, 0);
glCompileShader(fragshader);
glAttachShader(prog, fragshader);
glLinkProgram(prog);
glUseProgram(prog);
GLuint attribVertexPosition = glGetAttribLocation(prog, "vertexPosition");
float verts[4*3] = {1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, -1.0f, -1.0f, 0.0f};
GLuint vboId;
glGenBuffers(1, &vboId);
glBindBuffer(GL_ARRAY_BUFFER, vboId);
glBufferData(GL_ARRAY_BUFFER, 4*3*4, verts, GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(attribVertexPosition);
glVertexAttribPointer(attribVertexPosition, 3, GL_FLOAT, 0, 12, (void*)0);
LARGE_INTEGER syncer;
long long int waitfor = 0;
for (;;) {
//__asm__("int $3");
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glFlush();
QueryPerformanceCounter(&syncer);
if (waitfor>0) Sleep(waitfor/1000);
glFinish();
waitfor = elapsedMicroseconds(syncer)-500;
SwapBuffers(hdc);
MSG msg; char done = FALSE;
while (PeekMessage(&msg, 0, 0, 0, PM_REMOVE)) {
if (msg.message == WM_QUIT) done = TRUE;
TranslateMessage(&msg);
DispatchMessage(&msg);
}
if (done) break;
}
return 0;
}
So I am trying to render a character in OpenGL using freetype2. If I replace the variable vertex_location in my code with 0 I can see some kind of pixelated thing being rendered but it seems wrong because every time I restart the application the pixelated thing is different... So I am guessing it's just some random bytes or something.
Note: I am using GLEW, freetype2, glfw3, cglm
Anyways, here is my code:
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <stdio.h>
#include <cglm/cglm.h>
#include <cglm/call.h>
#include <math.h>
#include <ft2build.h>
#include FT_FREETYPE_H
/**
* Capture errors from glfw.
*/
static void error_callback(int error, const char* description)
{
fprintf(stderr, "Error: %s\n", description);
}
/**
* Capture key callbacks from glfw
*/
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GLFW_TRUE);
}
typedef struct CHARACTER_STRUCT
{
GLuint texture; // ID handle of the glyph texture
vec2 size; // Size of glyph
float width;
float height;
float bearing_left;
float bearing_top;
GLuint advance; // Horizontal offset to advance to next glyph
} character_T;
character_T* get_character(char c)
{
// FreeType
FT_Library ft;
// All functions return a value different than 0 whenever an error occurred
if (FT_Init_FreeType(&ft))
perror("ERROR::FREETYPE: Could not init FreeType Library");
// Load font as face
FT_Face face;
if (FT_New_Face(ft, "/usr/share/fonts/truetype/lato/Lato-Medium.ttf", 0, &face))
perror("ERROR::FREETYPE: Failed to load font");
// Set size to load glyphs as
FT_Set_Pixel_Sizes(face, 0, 32);
// Disable byte-alignment restriction
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// Load character glyph
if (FT_Load_Char(face, c, FT_LOAD_RENDER))
perror("ERROR::FREETYTPE: Failed to load Glyph");
// Generate texture
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RED,
face->glyph->bitmap.width,
face->glyph->bitmap.rows,
0,
GL_RED,
GL_UNSIGNED_BYTE,
face->glyph->bitmap.buffer
);
// Set texture options
/*glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);*/
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Now store character for later use
character_T* character = calloc(1, sizeof(struct CHARACTER_STRUCT));
character->texture = texture;
character->width = face->glyph->bitmap.width;
character->height = face->glyph->bitmap.rows;
character->bearing_left = face->glyph->bitmap_left;
character->bearing_top = face->glyph->bitmap_top;
character->advance = face->glyph->advance.x;
glBindTexture(GL_TEXTURE_2D, 0);
// Destroy FreeType once we're finished
FT_Done_Face(face);
FT_Done_FreeType(ft);
return character;
}
int main(int argc, char* argv[])
{
glfwSetErrorCallback(error_callback);
/**
* Initialize glfw to be able to use it.
*/
if (!glfwInit())
perror("Failed to initialize glfw.\n");
/**
* Setting some parameters to the window,
* using OpenGL 3.3
*/
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_FLOATING, GL_TRUE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
/**
* Creating our window
*/
GLFWwindow* window = glfwCreateWindow(640, 480, "My Title", NULL, NULL);
if (!window)
perror("Failed to create window.\n");
glfwSetKeyCallback(window, key_callback);
/**
* Enable OpenGL as current context
*/
glfwMakeContextCurrent(window);
/**
* Initialize glew and check for errors
*/
GLenum err = glewInit();
if (GLEW_OK != err)
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
fprintf(stdout, "Status: Using GLEW %s\n", glewGetString(GLEW_VERSION));
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
unsigned int VAO;
glGenVertexArrays(1, &VAO);
unsigned int VBO;
glGenBuffers(1, &VBO);
GLuint vertex_shader, fragment_shader, program;
GLint mvp_location, vertex_location;
/**
* Vertex Shader
*/
static const char* vertex_shader_text =
"#version 330 core\n"
"uniform mat4 MVP;\n"
"attribute vec4 thevertex;\n"
"out vec2 TexCoord;\n"
"void main()\n"
"{\n"
" gl_Position = MVP * vec4(thevertex.xy, 0.0, 1.0);\n"
" TexCoord = thevertex.zw;"
"}\n";
/**
* Fragment Shader
*/
static const char* fragment_shader_text =
"#version 330 core\n"
"varying vec3 color;\n"
"in vec2 TexCoord;\n"
"uniform sampler2D ourTexture;\n"
"void main()\n"
"{\n"
" vec4 sampled = vec4(1.0, 1.0, 1.0, texture(ourTexture, TexCoord).r);\n"
" gl_FragColor = vec4(vec3(1, 1, 1), 1.0) * sampled;\n"
"}\n";
int success;
char infoLog[512];
/**
* Compile vertex shader and check for errors
*/
vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader, 1, &vertex_shader_text, NULL);
glCompileShader(vertex_shader);
glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &success);
if(!success)
{
printf("Vertex Shader Error\n");
glGetShaderInfoLog(vertex_shader, 512, NULL, infoLog);
perror(infoLog);
}
/**
* Compile fragment shader and check for errors
*/
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader, 1, &fragment_shader_text, NULL);
glCompileShader(fragment_shader);
glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &success);
if(!success)
{
printf("Fragment Shader Error\n");
glGetShaderInfoLog(fragment_shader, 512, NULL, infoLog);
perror(infoLog);
}
/**
* Create shader program and check for errors
*/
program = glCreateProgram();
glAttachShader(program, vertex_shader);
glAttachShader(program, fragment_shader);
glLinkProgram(program);
glGetProgramiv(program, GL_LINK_STATUS, &success);
if(!success)
{
glGetProgramInfoLog(program, 512, NULL, infoLog);
perror(infoLog);
}
/**
* Grab locations from shader
*/
vertex_location = glGetUniformLocation(program, "thevertex");
mvp_location = glGetUniformLocation(program, "MVP");
glBindVertexArray(VAO);
/**
* Create and bind texture
*/
character_T* character = get_character('s');
unsigned int texture = character->texture;
float scale = 1.0f;
GLfloat xpos = 0;
GLfloat ypos = 0;
GLfloat w = character->width * scale;
GLfloat h = character->height * scale;
GLfloat vertices[6][4] = {
{ xpos, ypos + h, 0.0, 0.0 },
{ xpos, ypos, 0.0, 1.0 },
{ xpos + w, ypos, 1.0, 1.0 },
{ xpos, ypos + h, 0.0, 0.0 },
{ xpos + w, ypos, 1.0, 1.0 },
{ xpos + w, ypos + h, 1.0, 0.0 }
};
/**
* Main loop
*/
while (!glfwWindowShouldClose(window))
{
int width, height;
mat4 p, mvp;
double t = glfwGetTime();
glfwGetFramebufferSize(window, &width, &height);
glViewport(0, 0, width, height);
glClear(GL_COLOR_BUFFER_BIT);
mat4 m = GLM_MAT4_IDENTITY_INIT;
glm_translate(m, (vec3){ -sin(t), -cos(t), 0 });
glm_ortho_default(width / (float) height, p);
glm_mat4_mul(p, m, mvp);
glUseProgram(program);
glUniformMatrix4fv(mvp_location, 1, GL_FALSE, (const GLfloat*) mvp);
/**
* Draw texture
*/
glActiveTexture(GL_TEXTURE0);
glBindVertexArray(VAO);
glBindTexture(GL_TEXTURE_2D, texture);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * 6 * 4, vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(vertex_location);
glVertexAttribPointer(vertex_location, 4, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), 0);
glDrawArrays(GL_TRIANGLES, 0, 6);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}
thevertex is a vertex attribute.
attribute vec4 thevertex;
thus you have to get the resource index of the attribute by glGetAttribLocation rather than glGetUniformLocation.
vertex_location = glGetUniformLocation(program, "thevertex");
vertex_location = glGetAttribLocation(program, "thevertex");
glGetUniformLocation is meant to retrieve the the active resource index (location) of an uniform variable.
Below I've included glfw-quick-test.c, which is basically copied verbatim from http://www.glfw.org/docs/latest/quick.html - except for removed use of glad, added background color, a couple of defines so it compiles on Ubuntu 14.04 (64-bit), and a preprocessor #ifdef switch (macro DO_OPENGL_THREE) to change requested OpenGL version.
When I compile with:
gcc -g -o glfw-quick-test.exe glfw-quick-test.c -lglfw -lGLU -lGL -lm
./glfw-quick-test.exe
... then I get the message "GLFW Requesting OpenGL 2.1" and drawing is fine:
When I compile with:
gcc -g -DDO_OPENGL_THREE -o glfw-quick-test.exe glfw-quick-test.c -lglfw -lGLU -lGL -lm
./glfw-quick-test.exe
... then I get the message "GLFW Requesting OpenGL 3.2" and the rotating triangle is not drawn at all - only the background color is preserved:
Can anyone explain why does this happen? Can I somehow get GLFW3 to draw even if OpenGL 3.2 is requested, and if so, how?
(I'm aware that the original source code says "// NOTE: OpenGL error checks have been omitted for brevity", but I'm not sure what sort of error checks I should add, to see what the problem would be with OpenGL 3.2 drawing ...)
The code, glfw-quick-test.c (EDIT: now with error checking):
// http://www.glfw.org/docs/latest/quick.html
#define UBUNTU14
#ifdef UBUNTU14 // assume Ubuntu 14.04
// strange; Ubuntu 14 GLFW/glfw3.h doesn't have GLFW_TRUE, GLFW_FALSE, mentions GL_TRUE GL_FALSE
#define GLFW_TRUE GL_TRUE
#define GLFW_FALSE GL_FALSE
#endif
//~ #include <glad/glad.h> // "GL/GLES/EGL/GLX/WGL Loader-Generator based on the official specs."
#include <GLFW/glfw3.h>
#include "linmath.h"
#include <stdlib.h>
#include <stdio.h>
static const struct
{
float x, y;
float r, g, b;
} vertices[3] =
{
{ -0.6f, -0.4f, 1.f, 0.f, 0.f },
{ 0.6f, -0.4f, 0.f, 1.f, 0.f },
{ 0.f, 0.6f, 0.f, 0.f, 1.f }
};
static const char* vertex_shader_text =
"uniform mat4 MVP;\n"
"attribute vec3 vCol;\n"
"attribute vec2 vPos;\n"
"varying vec3 color;\n"
"void main()\n"
"{\n"
" gl_Position = MVP * vec4(vPos, 0.0, 1.0);\n"
" color = vCol;\n"
"}\n";
static const char* fragment_shader_text =
"varying vec3 color;\n"
"void main()\n"
"{\n"
" gl_FragColor = vec4(color, 1.0);\n"
"}\n";
static void error_callback(int error, const char* description)
{
fprintf(stderr, "Error: %s\n", description);
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GLFW_TRUE);
}
void checkGLerrors(char *label) {
// check OpenGL error
GLenum err;
while ((err = glGetError()) != GL_NO_ERROR) {
char* errorstr = "";
switch(err) {
case GL_INVALID_OPERATION: errorstr="INVALID_OPERATION"; break;
case GL_INVALID_ENUM: errorstr="INVALID_ENUM"; break;
case GL_INVALID_VALUE: errorstr="INVALID_VALUE"; break;
case GL_OUT_OF_MEMORY: errorstr="OUT_OF_MEMORY"; break;
case GL_INVALID_FRAMEBUFFER_OPERATION: errorstr="INVALID_FRAMEBUFFER_OPERATION"; break;
}
printf("OpenGL error ('%s'): %d %s\n", label, err, errorstr);
}
}
int main(void)
{
GLFWwindow* window;
GLuint vertex_buffer, vertex_shader, fragment_shader, program;
GLint mvp_location, vpos_location, vcol_location;
glfwSetErrorCallback(error_callback);
if (!glfwInit())
exit(EXIT_FAILURE);
// NB: Ubuntu will not draw if 3.2 (just black screen) - only if 2.0 or 2.1?
#ifdef DO_OPENGL_THREE
printf("GLFW Requesting OpenGL 3.2\n");
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // only 3.2+
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE); //only 3.0+
glfwWindowHint(GLFW_RESIZABLE, GL_TRUE); // https://stackoverflow.com/q/23834680/
#else
printf("GLFW Requesting OpenGL 2.1\n");
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2); // 2);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1); // 0);
#endif
checkGLerrors("post hint");
window = glfwCreateWindow(640, 480, "Simple example", NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
checkGLerrors("post glfwCreateWindow");
glfwSetKeyCallback(window, key_callback);
glfwMakeContextCurrent(window);
//~ gladLoadGLLoader((GLADloadproc) glfwGetProcAddress);
glfwSwapInterval(1);
// NOTE: OpenGL error checks have been omitted for brevity
glGenBuffers(1, &vertex_buffer);
checkGLerrors("post glGenBuffers");
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader, 1, &vertex_shader_text, NULL);
glCompileShader(vertex_shader);
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader, 1, &fragment_shader_text, NULL);
glCompileShader(fragment_shader);
program = glCreateProgram();
glAttachShader(program, vertex_shader);
glAttachShader(program, fragment_shader);
glLinkProgram(program);
checkGLerrors("post glLinkProgram");
mvp_location = glGetUniformLocation(program, "MVP");
vpos_location = glGetAttribLocation(program, "vPos");
vcol_location = glGetAttribLocation(program, "vCol");
checkGLerrors("post gl locations");
glEnableVertexAttribArray(vpos_location);
checkGLerrors("post gl EnableVertexAttribArray");
glVertexAttribPointer(vpos_location, 2, GL_FLOAT, GL_FALSE,
sizeof(float) * 5, (void*) 0);
checkGLerrors("post glVertexAttribPointer");
glEnableVertexAttribArray(vcol_location);
checkGLerrors("post glEnableVertexAttribArray");
glVertexAttribPointer(vcol_location, 3, GL_FLOAT, GL_FALSE,
sizeof(float) * 5, (void*) (sizeof(float) * 2));
checkGLerrors("post glVertexAttribPointer");
while (!glfwWindowShouldClose(window))
{
float ratio;
int width, height;
mat4x4 m, p, mvp;
glfwGetFramebufferSize(window, &width, &height);
ratio = width / (float) height;
glViewport(0, 0, width, height);
glClearColor(0.784314, 0.780392, 0.305882, 1.0); // add background color
glClear(GL_COLOR_BUFFER_BIT);
mat4x4_identity(m);
mat4x4_rotate_Z(m, m, (float) glfwGetTime());
mat4x4_ortho(p, -ratio, ratio, -1.f, 1.f, 1.f, -1.f);
mat4x4_mul(mvp, p, m);
glUseProgram(program);
glUniformMatrix4fv(mvp_location, 1, GL_FALSE, (const GLfloat*) mvp);
glDrawArrays(GL_TRIANGLES, 0, 3);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
Thanks to #BDL and his comment about VAO (vertex array objects), I found How to use VBOs without VAOs with OpenGL core profile? - and by trying out stuff from there, I found that the only change to the above OP code, needed so drawing is shown in OpenGL 3.2, is this:
...
//~ gladLoadGLLoader((GLADloadproc) glfwGetProcAddress);
glfwSwapInterval(1);
#ifdef DO_OPENGL_THREE
// https://stackoverflow.com/a/30057424/277826:
// "You can however just create and bind a VAO and forget about it (keep it bound)."
GLuint VAO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
#endif
// NOTE: OpenGL error checks have been omitted for brevity
glGenBuffers(1, &vertex_buffer);
...
Once this section with VAO is in, there are no more OpenGL errors printed, and drawing is OK.
I am trying to write a basic volume renderer which uses opengl, and cg for writing shaders. I'm putting my transfer function in a one dimensional texture, and using that in a dependent texture lookup in the fragment shader. My problem is that I'm getting an openGL error when I try to enable the parameter corresponding to that 1D texture.
My code is pretty messy; right now it's basically a mashup of code taken from "Real-Time Volume Rendering" and "The CG User's Manual".
c file:
#include <stdio.h>
#include <stdlib.h>
#include <GL/glut.h>
#include <Cg/cg.h>
#include <Cg/cgGL.h>
static CGcontext myCgContext;
static CGprofile myCgVertexProfile,
myCgFragmentProfile;
static CGprogram myCgVertexProgram,
myCgFragmentProgram;
static const char *myProgramName = "first_volumetric_renderer",
*myVertexProgramFileName = "fvr_vertex.cg",
*myVertexProgramName = "fvr_vertex",
*myFragmentProgramFileName = "fvr_fragment.cg",
*myFragmentProgramName = "fvr_fragment";
static CGparameter first_texture, second_texture, transfer_function;
#define XDIM 256
#define YDIM 256
#define ZDIM 256
#define TRANSFER_RESOLUTION 256
static GLubyte raw_data[XDIM][YDIM][ZDIM];
static GLubyte transfer[TRANSFER_RESOLUTION][4];
static GLuint transfer_name;
static GLuint x_textures[XDIM], y_textures[YDIM], z_textures[ZDIM];
static void checkForCgError(const char *situation);
/* print any errors if we get them */
void check_gl_error(const char * where){
GLenum error = glGetError();
if(error != GL_NO_ERROR){
printf("openGL Error : %s : %s\n", where, gluErrorString(error));
exit(1);
}
}
long int file_length(FILE *f){
long int pos = ftell(f);
fseek(f, 0, SEEK_END);
long int result = ftell(f);
fseek(f, pos, SEEK_SET);
return result;
}
void get_volume_data(const char *filename){
FILE *in = fopen(filename, "r");
if(in == NULL) {
printf("opening '%s' to get volume data failed, exiting...\n", filename);
exit(1);
}
long int length = file_length(in);
if(length != XDIM*YDIM*ZDIM){
printf("the file does not contain a volume of the correct dimensions\n");
exit(1);
}
size_t res = fread((char *)raw_data, 1, length, in);
if(res < length) printf("error reading in file\n");
fclose(in);
}
void create_textures(){
glEnable(GL_TEXTURE_2D);
// reserve texture identifiers
glGenTextures(XDIM, x_textures);
glGenTextures(YDIM, y_textures);
glGenTextures(ZDIM, z_textures);
// set texture properties
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// generate slices in X
{
int x,y,z;
GLubyte x_slice[ZDIM][YDIM];
for(x=0;x < XDIM; x++){
for(y=0;y < YDIM; y++){
for(z=0;z < ZDIM; z++){
x_slice[z][y] = raw_data[x][y][z];
}
}
GLuint texname = x_textures[x];
glBindTexture(GL_TEXTURE_2D, texname);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, ZDIM, YDIM, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, x_slice);
}
}
// generate slices in Y
{
int x,y,z;
GLubyte y_slice[XDIM][ZDIM];
for(y=0;y < YDIM; y++){
for(x=0;x < XDIM; x++){
for(z=0;z < ZDIM; z++){
y_slice[x][z] = raw_data[x][y][z];
}
}
GLuint texname = y_textures[y];
glBindTexture(GL_TEXTURE_2D, texname);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, XDIM, ZDIM, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, y_slice);
}
}
// generate slices in Z
{
int x,y,z;
GLubyte z_slice[XDIM][YDIM];
for(z=0;z < ZDIM; z++){
for(y=0;y < YDIM; y++){
for(x=0;x < XDIM; x++){
z_slice[x][y] = raw_data[x][y][z];
}
}
GLuint texname = z_textures[z];
glBindTexture(GL_TEXTURE_2D, texname);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, XDIM, YDIM, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, z_slice);
}
}
}
void DrawSliceStack_NegativeZ(int numSlices){
double dZPos = -1.0;
double dZStep = 2.0/((double)numSlices);
int slice;
for(slice = 0;slice < numSlices;slice++){
double dZPosTex = (ZDIM * (dZPos + 1.0)/2.0);
int nTexIdx = (int)dZPosTex;
double dAlpha = dZPosTex - (double)nTexIdx;
check_gl_error("in slice-drawing function, before cg-stuff");
cgGLSetTextureParameter(first_texture, z_textures[nTexIdx]);
checkForCgError("setting first texture");
check_gl_error("1");
cgGLEnableTextureParameter(first_texture);
checkForCgError("enabling first texture");
check_gl_error("2");
cgGLSetTextureParameter(second_texture, z_textures[nTexIdx + 1]);
checkForCgError("setting second texture");
check_gl_error("3");
cgGLEnableTextureParameter(second_texture);
checkForCgError("enabling second texture");
check_gl_error("4");
cgGLSetTextureParameter(transfer_function, transfer_name);
checkForCgError("setting transfer function");
check_gl_error("5");
cgGLEnableTextureParameter(transfer_function);
checkForCgError("enabling transfer function");
check_gl_error("before updating parameters");
cgUpdateProgramParameters(myCgFragmentProgram);
checkForCgError("updating parameters");
check_gl_error("before drawing a slice");
glBegin(GL_QUADS);
glTexCoord3d(0.0, 0.0, dAlpha);
glVertex3d(-1.0, -1.0, dZPos);
glTexCoord3d(0.0, 1.0, dAlpha);
glVertex3d(-1.0, 1.0, dZPos);
glTexCoord3d(1.0, 1.0, dAlpha);
glVertex3d(1.0, 1.0, dZPos);
glTexCoord3d(1.0, 0.0, dAlpha);
glVertex3d(1.0, -1.0, dZPos);
glEnd();
check_gl_error("after drawing a slice");
dZPos += dZStep;
cgGLDisableTextureParameter(first_texture);
checkForCgError("disabling first texture");
cgGLDisableTextureParameter(second_texture);
checkForCgError("disabling second texture");
cgGLDisableTextureParameter(transfer_function);
checkForCgError("disabling transfer function");
}
}
void create_transfer_texture(){
glEnable(GL_TEXTURE_1D);
// create the raw data
int i;
for(i = 0; i < TRANSFER_RESOLUTION; i++){
if(i < 50) {
transfer[i][0] = (GLubyte)0;
transfer[i][1] = (GLubyte)0;
transfer[i][2] = (GLubyte)0;
transfer[i][3] = (GLubyte)0;
}
else {
transfer[i][0] = (GLubyte)255;
transfer[i][1] = (GLubyte)255;
transfer[i][2] = (GLubyte)255;
transfer[i][3] = (GLubyte)i;
}
}
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glGenTextures(1, &transfer_name);
glBindTexture(GL_TEXTURE_3D, transfer_name);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage1D(GL_TEXTURE_1D, 0, GL_RGBA, TRANSFER_RESOLUTION, 0, GL_RGBA, GL_UNSIGNED_BYTE, transfer);
check_gl_error("creating transfer texture");
}
static void checkForCgError(const char *situation)
{
CGerror error;
const char *string = cgGetLastErrorString(&error);
if (error != CG_NO_ERROR) {
printf("%s: %s: %s\n",
myProgramName, situation, string);
if (error == CG_COMPILER_ERROR) {
printf("%s\n", cgGetLastListing(myCgContext));
}
exit(1);
}
}
void init_CG(){
// copy-pasted straight from one of the CG examples
myCgContext = cgCreateContext();
checkForCgError("creating context");
cgGLSetDebugMode(CG_FALSE);
cgSetParameterSettingMode(myCgContext, CG_DEFERRED_PARAMETER_SETTING);
myCgFragmentProfile = cgGLGetLatestProfile(CG_GL_FRAGMENT);
cgGLSetOptimalOptions(myCgFragmentProfile);
checkForCgError("selecting fragment profile");
myCgFragmentProgram =
cgCreateProgramFromFile(
myCgContext, /* Cg runtime context */
CG_SOURCE, /* Program in human-readable form */
myFragmentProgramFileName, /* Name of file containing program */
myCgFragmentProfile, /* Profile: OpenGL ARB vertex program */
myFragmentProgramName, /* Entry function name */
NULL); /* No extra compiler options */
checkForCgError("creating fragment program from file");
cgGLLoadProgram(myCgFragmentProgram);
checkForCgError("loading fragment program");
first_texture = cgGetNamedParameter(myCgFragmentProgram, "text0");
checkForCgError("could not get 'texture0'");
second_texture = cgGetNamedParameter(myCgFragmentProgram, "text1");
checkForCgError("could not get 'texture1'");
transfer_function = cgGetNamedParameter(myCgFragmentProgram, "transfer_function");
checkForCgError("could not get 'transfer_function'");
check_gl_error("initializing CG");
}
void reshape(int w, int h)
{
if (h == 0) h = 1;
glViewport(0, 0,w,h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-2, 2, -2, 2, -2, 2); // use orthographic projection
glMatrixMode(GL_MODELVIEW);
}
void display(){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/* cgGLBindProgram(myCgVertexProgram);
checkForCgError("binding vertex program");
cgGLEnableProfile(myCgVertexProfile);
checkForCgError("enabling vertex profile");*/
cgGLBindProgram(myCgFragmentProgram);
checkForCgError("binding fragment program");
cgGLEnableProfile(myCgFragmentProfile);
checkForCgError("enabling fragment profile");
check_gl_error("before entering slice-drawing function");
DrawSliceStack_NegativeZ(ZDIM * 2);
/*cgGLDisableProfile(myCgVertexProfile);
checkForCgError("disabling vertex profile");*/
cgGLDisableProfile(myCgFragmentProfile);
checkForCgError("disabling fragment profile");
glutSwapBuffers();
check_gl_error("Finishing 'display()'");
}
void keyboard(unsigned char c, int x, int y){
}
void init_glut(int argc, char** argv){
glutInitWindowSize(400, 400);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInit(&argc, argv);
glutCreateWindow(myProgramName);
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutReshapeFunc(reshape);
glClearColor(1.0, 0.0, 0.0, 0.0); /* Black background */
}
int main(int argc, char **argv){
init_glut(argc, argv);
init_CG();
get_volume_data("aneurism.raw");
create_textures();
create_transfer_texture();
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glutMainLoop();
return 0;
}
fragment shader (fvr_fragment.cg):
float4 fvr_fragment(half3 texUV : TEXCOORD0,
uniform sampler2D text0,
uniform sampler2D text1,
uniform sampler1D transfer_function) : COLOR
{
half tex0 = tex2D(text0, texUV.xy);
half tex1 = tex2D(text1, texUV.xy);
half interp = lerp(tex0, tex1, texUV.z);
float4 result = tex1D(transfer_function, interp);
return result;
}
the volume data
When run, the program outputs:
openGL Error : before updating parameters : invalid operation
the line which causes the error, which I found thanks to my ad-hoc print debugging, is:
cgGLEnableTextureParameter(transfer_function);
Any ideas?
Your error is here
glBindTexture(GL_TEXTURE_3D, transfer_name);
It should read
glBindTexture(GL_TEXTURE_1D, transfer_name);