I have this bit of code that sets up my vertex attribute pointers for rendering:
glBindBuffer(GL_ARRAY_BUFFER, renderer->instancesBuffer);
enableFloatVertexAttribute(attributePosition, 2, sizeof(struct surfaceInstance), 0, 1);
enableFloatVertexAttribute(attributeSize, 2, sizeof(struct surfaceInstance), (void*) (2 * sizeof(float)), 1);
enableFloatVertexAttribute(attributeAngle, 1, sizeof(struct surfaceInstance), (void*) (4 * sizeof(float)), 1);
enableFloatVertexAttribute(attributeIdentifier, 1, sizeof(struct surfaceInstance), (void*) (5 * sizeof(float)), 1);
enableFloatVertexAttribute(attributeAtlasOffset, 2, sizeof(struct surfaceInstance), (void*) (6 * sizeof(float)), 1);
enableFloatVertexAttribute(attributeTextureSize, 2, sizeof(struct surfaceInstance), (void*) (8 * sizeof(float)), 1);
...
void enableFloatVertexAttribute(int32_t attribute, uint32_t size, uint32_t stride, void const *offset, uint32_t divisor) {
glEnableVertexAttribArray(attribute);
glVertexAttribPointer(attribute, size, GL_FLOAT, stride, GL_FALSE, offset);
if(divisor != 0) glVertexAttribDivisor(attribute, divisor);
}
...
layout(location = 0) in vec2 vertex;
layout(location = 1) in vec2 position;
layout(location = 2) in vec2 size;
layout(location = 3) in float angle;
layout(location = 4) in float identifierPass;
layout(location = 5) in vec2 atlasOffset;
layout(location = 6) in vec2 textureSize;
It sets up the attributes of the buffer below:
Which is created by calling:
surfaceRendererAppendSurface(renderer, 100, 100, 48, 48, glfwTime, 1);
surfaceRendererAppendSurface(renderer, 200, 100, 48, 48, glfwTime, 1);
As one can see, the data is sent to the GPU properly, the first instance is drawn properly and the GPU is drawing the adequate number of instances, but when I profile my application, the second, third, or fourth instance is all wrong, not following the buffer supplied at all... Which leads me to believe it is a problem with division what is even more bizarre, is that when i switch my divisor to 2, the second instance is drawn well, and then 3, and the third is drawn correctly... It is very weird and I have no idea what is going on...
Case in point: Instance 1 = perfectly copied!
Instance = What the hell happened???
(Looking at it right now... it looks like vertexAttribDivisor had no effect!)
(Looking at it right now... it looks like vertexAttribDivisor had no effect!)
To me, it looks like your stride is completely off. And voila, you swapped the function parameters:
glVertexAttribPointer(attribute, size, GL_FLOAT, stride, GL_FALSE, offset);
^^^^^^^^ this is actually what you set as stride
Since GL_FALSE is 0, and the GL intepretes a stride of 0 as tightly packed array, the data in the screenshot makes perfect sense.
Related
I'm pretty new to this. here is the shader code:
#version 430 es
precision mediump float;
layout(local_size_x = 100, local_size_y = 1, local_size_z = 1) in;
layout(std430) buffer;
layout(binding = 0) buffer Input0 {
float elements[];
} input_data0;
layout(binding = 1) buffer Input1 {
float elements[];
} input_data1;
layout(binding = 2) buffer Output1 {
float elements[];
} output_data0;
void main()
{
uint index = gl_GlobalInvocationID.x;
float result = input_data0.elements[index] * input_data1.elements[index];
atomicAdd(output_data0.elements[0], result);
}
and here is how I call it:
glUseProgram(ann->sumShaderProgram);
glGenBuffers(1, &SSBO1);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, SSBO1);
glBufferData(GL_SHADER_STORAGE_BUFFER, num_connections * sizeof(GLfloat), weights, GL_STATIC_DRAW);
glGenBuffers(1, &SSBO2);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, SSBO2);
glBufferData(GL_SHADER_STORAGE_BUFFER, num_connections * sizeof(GLfloat), layer_it->values, GL_STATIC_DRAW);
glGenBuffers(1, &SSBO3);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, SSBO3);
glBufferData(GL_SHADER_STORAGE_BUFFER, 1 * sizeof(GLfloat), &neuron_sum, GL_DYNAMIC_DRAW);
glDispatchCompute(num_connections/100, 1, 1);
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
output = (fann_type*)glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, 1 * sizeof(GLfloat), GL_MAP_READ_BIT);
neuron_sum = output[0];
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
glDeleteBuffers(1, &SSBO1);
glDeleteBuffers(1, &SSBO2);
glDeleteBuffers(1, &SSBO3);
glMapBufferRange returns NULL and the program segfaults. it looks like a weird function call, how is it supposed to know to use the third buffer? is there a way to retrieve just a single output float instead of a buffer of 1 element, and would that be better?
I am learning OpenGL and right now I am stuck on loading shaders. 90% of the time, this code works. The other 10% of the time, I get the following error for the vertex shader's compilation (I removed the error logging from the code below for easier readability.):
Vertex shader failed to compile with the following errors:
ERROR: 0:16: error(#132) Syntax error: "<" parse error
ERROR: error(#273) 1 compilation errors. No code generated
Shader loading code:
unsigned int LoadShader(const char *path_vert, const char *path_frag) { // Returns shader program ID.
unsigned int shader_program;
FILE *file;
char *source_vert, *source_frag;
unsigned int file_size;
// Read vertex shader.
file = fopen(path_vert, "rb");
fseek(file, 0, SEEK_END);
file_size = ftell(file);
fseek(file, 0, SEEK_SET);
source_vert = (char*)malloc(file_size + 1);
fread(source_vert, 1, file_size, file);
// Read fragment shader.
file = fopen(path_frag, "rb");
fseek(file, 0, SEEK_END);
file_size = ftell(file);
fseek(file, 0, SEEK_SET);
source_frag = (char*)malloc(file_size + 1);
fread(source_frag, 1, file_size, file);
fclose(file);
// Make sure the shader sources aren't garbage.
printf("%s\n\n %s\n", source_vert, source_frag);
// Create vertex shader.
unsigned int vert_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vert_shader, 1, &source_vert, NULL);
glCompileShader(vert_shader);
// Create fragment shader.
unsigned int frag_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(frag_shader, 1, &source_frag, NULL);
glCompileShader(frag_shader);
// Create shader program.
shader_program = glCreateProgram();
glAttachShader(shader_program, vert_shader);
glAttachShader(shader_program, frag_shader);
glLinkProgram(shader_program);
// Clean up the extra bits.
glDeleteShader(vert_shader);
glDeleteShader(frag_shader);
free(source_vert);
free(source_frag);
return shader_program;
}
Vertex shader:
#version 460 core
layout (location = 0) in vec3 a_pos
layout (location = 1) in vec2 a_tex_coord
out vec2 tex_coord;
void main() {
gl_Position = vec4(a_pos, 1.0f);
tex_coord = a_tex_coord;
}
Fragment shader:
#version 460 core
uniform sampler2D tex0;
in vec2 tex_coord;
out vec4 frag_color;
void main() {
frag_color = texture(tex0, tex_coord);
}
I am compiling for C99 with GCC using VS Code. Thanks for reading!
I suspect that the problem is that source_vert and source_frag buffers are not null-terminated. You allocate file_size + 1 bytes for each but then only fill file_size bytes by reading from file leaving last byte filled with garbage.
user7860670's answer solved my issue: All that was needed was a source_vert[file_size] = '\0'; and the problem was solved. I also added source_frag[file_size] = '\0'; just to be safe. Make sure to null-terminate your strings!! :)
I'm trying to load an MD2 model but I can't seem to get the vertices to draw correctly. I'm not loading UVs or normals at the moment just want to see the model appear correctly in a single frame then take it from there.
Here's my md2 structures (mostly taken from here):
struct v3
{
union
{
struct
{
union { float x; float r; };
union { float y; float g; };
union { float z; float b; };
};
float At[3];
};
};
struct md2_header
{
unsigned int Magic;
unsigned int Version;
unsigned int TextureWidth;
unsigned int TextureHeight;
unsigned int FrameSize;
unsigned int NumTextures;
unsigned int NumVertices;
unsigned int NumUVs;
unsigned int NumTrigs;
unsigned int NumGLCommands;
unsigned int NumFrames;
unsigned int OffsetTextures;
unsigned int OffsetUVs;
unsigned int OffsetTrigs;
unsigned int OffsetFrames;
unsigned int OffsetGLCommands;
unsigned int OffsetEnd;
};
struct md2_vertex
{
unsigned char At[3];
unsigned char NormalIndex;
};
struct md2_frame
{
float Scale[3];
float Translate[3];
char Name[16];
md2_vertex *Vertices;
};
struct md2_skin
{
char Name[64];
};
struct md2_uv
{
unsigned short u;
unsigend short v;
}
struct md2_triangle
{
unsigned short Vertices[3];
unsigned short UVs[3];
};
struct md2_model
{
md2_header Header;
md2_uv *UVs;
md2_triangle *Triangles;
md2_frame *Frames;
md2_skin *Skins;
int *GLCommands;
unsigned int Texture;
unsigned int VAO, VBO;
};
And here's my simple loading function:
void MD2LoadModel (char *FilePath, md2_model *Model)
{
FILE *File = fopen (FilePath, "rb");
if (!File)
{
fprintf (stderr, "Error: couldn't open \"%s\"!\n", FilePath);
return;
}
#define FREAD(Dest, Type, Count)\
fread(Dest, sizeof(Type), Count, File)
#define FSEEK(Offset)\
fseek(File, Offset, SEEK_SET)
#define ALLOC(Type, Count)\
(Type *)malloc(sizeof(Type) * Count)
/* Read Header */
FREAD(&Model->Header, md2_header, 1);
if ((Model->Header.Magic != 844121161) ||
(Model->Header.Version != 8))
{
fprintf (stderr, "Error: bad md2 Version or identifier\n");
fclose (File);
return;
}
/* Memory allocations */
Model->Skins = ALLOC(md2_skin, Model->Header.NumTextures);
Model->UVs = ALLOC(md2_uv, Model->Header.NumUVs);
Model->Triangles = ALLOC(md2_triangle, Model->Header.NumTrigs);
Model->Frames = ALLOC(md2_frame, Model->Header.NumFrames);
Model->GLCommands = ALLOC(int, Model->Header.NumGLCommands);
/* Read model data */
FSEEK(Model->Header.OffsetTextures);
FREAD(Model->Skins, md2_skin, Model->Header.NumTextures);
FSEEK(Model->Header.OffsetUVs);
FREAD(Model->UVs, md2_uv, Model->Header.NumUVs);
FSEEK(Model->Header.OffsetTrigs);
FREAD(Model->Triangles, md2_triangle, Model->Header.NumTrigs);
FSEEK(Model->Header.OffsetGLCommands);
FREAD(Model->GLCommands, int, Model->Header.NumGLCommands);
/* Read frames */
FSEEK(Model->Header.OffsetFrames);
for (int i = 0; i < Model->Header.NumFrames; i++)
{
/* Memory allocation for vertices of this frame */
Model->Frames[i].Vertices = (md2_vertex *)
malloc(sizeof(md2_vertex) * Model->Header.NumVertices);
/* Read frame data */
FREAD(&Model->Frames[i].Scale, v3, 1);
FREAD(&Model->Frames[i].Translate, v3, 1);
FREAD(Model->Frames[i].Name, char, 16);
FREAD(Model->Frames[i].Vertices, md2_vertex, Model->Header.NumVertices);
}
v3 *Vertices = ALLOC(v3, Model->Header.NumVertices);
md2_frame *Frame = &Model->Frames[0];
For(u32, i, Model->Header.NumVertices)
{
Vertices[i] = V3(
(Frame->Vertices[i].At[0] * Frame->Scale[0]) + Frame->Translate[0],
(Frame->Vertices[i].At[1] * Frame->Scale[1]) + Frame->Translate[1],
(Frame->Vertices[i].At[2] * Frame->Scale[2]) + Frame->Translate[2]);
}
glGenBuffers(1, &Model->VBO);
glBindBuffer(GL_ARRAY_BUFFER, Model->VBO);
glBufferData(GL_ARRAY_BUFFER, Model->Header.NumVertices * sizeof(v3), Vertices, GL_STATIC_DRAW);
glGenVertexArrays(1, &Model->VAO);
glBindVertexArray(Model->VAO);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
fclose (File);
free(Vertices);
#undef FSEEK
#undef FREAD
#undef ALLOC
}
Only passing the vertices data. Which, from my understanding Header->NumVertices is the number of vertices in each frame. So I'm taking an arbitrary frame (frame 0 in this case) and reading its uncompressed vertices data into Vertices.
Now I read in a book that Quake had their y and z axes flipped, but that still didn't change much.
Here's how I'm drawing the model:
GLuint Shader = Data->Shaders.Md2Test;
ShaderUse(Shader);
ShaderSetM4(Shader, "view", &WorldToView);
ShaderSetM4(Shader, "projection", &ViewToProjection);
glBindVertexArray(DrFreak.VAO);
{
ModelToWorld = m4_Identity;
ShaderSetM4(Shader, "model", &ModelToWorld);
glDrawArrays(GL_TRIANGLES, 0, DrFreak.Header.NumVertices);
}
glBindVertexArray(0);
The matrices are calculated in a CameraUpdate function which I can verify is working correctly because everything else in the scene render properly except the MD2 model. See:
Everything in yellow is supposed to be the MD2 model.
Here are my shaders (pretty much the same shaders for the crates and planes except there's only one 'in' variable, the position and no UVs):
#version 330 core
layout (location = 0) in vec3 position;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(position, 1.0f);
}
#version 330 core
out vec4 color;
void main()
{
color = vec4(1, 1, 0, 1);
}
I've been stuck here for a couple of days. I stepped into the loading code and I seem to be getting valid values. I'm not sure what's the issue. What am I doing wrong/missing?
Any help is appreciated.
I fixed the problem by duplicating the vertices/uvs getting them from the tirangles data. I didn't have to flip the 't' UV coordinate like many tutorials do. I switched the y and z coordinates cause they're flipped.
u32 NumVerts = Model->Header.NumTrigs * 3;
u32 NumUVs = NumVerts;
v3 *Vertices = ALLOC(v3, NumVerts);
v2 *UVs = ALLOC(v2, NumUVs);
md2_frame *Frame = &Model->Frames[0]; // render first frame for testing
For(u32, i, Model->Header.NumTrigs)
{
For(u32, j, 3)
{
u32 VertIndex = Model->Triangles[i].Vertices[j];
Vertices[i * 3 + j] = V3(
(Frame->Vertices[VertIndex].At[0] * Frame->Scale[0]) + Frame->Translate[0],
(Frame->Vertices[VertIndex].At[2] * Frame->Scale[2]) + Frame->Translate[2],
(Frame->Vertices[VertIndex].At[1] * Frame->Scale[1]) + Frame->Translate[1]);
u32 UVIndex = Model->Triangles[i].UVs[j];
UVs[i * 3 + j] = V2(
Model->UVs[UVIndex].u / (r32)Model->Header.TextureWidth,
Model->UVs[UVIndex].v / (r32)Model->Header.TextureHeight);
}
}
glGenVertexArrays(1, &Model->VAO);
glBindVertexArray(Model->VAO);
glGenBuffers(1, &Model->VBO);
glBindBuffer(GL_ARRAY_BUFFER, Model->VBO);
glBufferData(GL_ARRAY_BUFFER, NumVerts * sizeof(v3), Vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
u32 UVBuffer;
glGenBuffers(1, &UVBuffer);
glBindBuffer(GL_ARRAY_BUFFER, UVBuffer);
glBufferData(GL_ARRAY_BUFFER, NumUVs * sizeof(v2), UVs, GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
I will probably use indexed arrays and glDrawElements. But for my testing purposes glDrawArrays is good enough. If anyone knows of a better way to do all this feel free to leave a comment.
And there's Dr Freak chillin'
I want to use some convenience methods to generate vertex and colour arrays for use in objects. From what i've seen on generating arrays, this is an example of what I currently use:
GLfloat * CDMeshVertexesCreateRectangle(CGFloat height, CGFloat width) {
// Requires the rendering method GL_TRIANGLE_FAN
GLfloat *squareVertexes = (GLfloat *) malloc(8 * sizeof(GLfloat));
squareVertexes[0] = -(width / 2);
squareVertexes[1] = -(height / 2);
squareVertexes[2] = -(width / 2);
squareVertexes[3] = (height / 2);
squareVertexes[4] = (width / 2);
squareVertexes[5] = (height / 2);
squareVertexes[6] = (width / 2);
squareVertexes[7] = -(height / 2);
return squareVertexes;
}
But when i use it on something such as this:
GLuint memoryPointer = 0;
GLuint colourMemoryPointer = 0;
GLfloat *vertexes = CDMeshVertexesCreateRectangle(200, 200);
GLfloat *colors = CDMeshColorsCreateGrey(1.0, 4);
// Allocate the buffer
glGenBuffers(1, &memoryPointer);
// Bind the buffer object (tell OpenGL what to use)
glBindBuffer(GL_ARRAY_BUFFER, memoryPointer);
// Allocate space for the VBO
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexes), vertexes, GL_STATIC_DRAW);
// Allocate the buffer
glGenBuffers(1, &colourMemoryPointer);
// Bind the buffer object (tell OpenGL what to use)
glBindBuffer(GL_ARRAY_BUFFER, colourMemoryPointer);
// Allocate space for the VBO
glBufferData(GL_ARRAY_BUFFER, sizeof(colors), colors, GL_STATIC_DRAW);
glEnableClientState(GL_VERTEX_ARRAY); // Activate vertex coordinates array
glEnableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, memoryPointer);
glVertexPointer(2, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, colourMemoryPointer);
glColorPointer(4, GL_FLOAT, 0, 0);
//render
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY); // Deactivate vertex coordinates array
glDisableClientState(GL_COLOR_ARRAY);
free(vertexes);
free(colors);
The rendering doesn't hold up, and random problems occur during rendering such as flickering, color distortion and more. When using the same code for initialisation and rendering when using a normally defined array (removing the generated vertexes and its related code), no problems occur.
GLfloat Square[8] = {
-100, -100,
-100, 100,
100, 100,
100, -100
};
Does anyone know where i'm going wrong?
You have two problems in your code. First this pattern:
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexes), vertexes, GL_STATIC_DRAW);
sizeof(vertexes) evaluates to the size of the pointer variable, not the size of the buffer. C/C++ newbie mistake, we all did it. You need to keep track of the size yourself. So do it like this:
int allocate_a_buffer(CGFloat height, CGFloat width, GLfloat **buffer, size_t *buffer_size)
{
// Requires the rendering method GL_TRIANGLE_FAN
return ( *buffer = (GLfloat *) malloc( *buffer_size = ( <xxx> * sizeof(GLfloat)) ) ) != 0;
}
and
GLfloat *vertices;
size_t vertices_size;
if( !allocate_a_buffer(..., &vertices, &vertices_size) ) {
error();
return;
}
glBufferData(GL_ARRAY_BUFFER, vertices_size, vertices, GL_STATIC_DRAW);
If you're using C++ just use a std::vector passed by reference:
void initialize_buffer(..., std::vector<GLfloat> &buffer)
{
buffer.resize(...);
for(int n = ...; ...; ...) {
buffer[n] = ...;
}
}
and
std::vector<GLfloat> vertices;
initialize_buffer(..., vertices);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(vertices[0]), &vertices[0], GL_STATIC_DRAW);
much less fuzz.
The other problem is, that this code seems to be called by the drawing function. The whole point of buffer objects is, that you initialize them only one time and then only bind and draw from them in the display routine. So glDrawArrays belongs into another function than the rest of this code, namely the display routine, while the rest belongs into the data loading and scene data management code.
I am trying to get bitmap from mouse cursor, but with next code, i just can't get colors.
CURSORINFO cursorInfo = { 0 };
cursorInfo.cbSize = sizeof(cursorInfo);
if (GetCursorInfo(&cursorInfo)) {
ICONINFO ii = {0};
int p = GetIconInfo(cursorInfo.hCursor, &ii);
// get screen
HDC dc = GetDC(NULL);
HDC memDC = CreateCompatibleDC(dc);
//SelectObject(memDC, ii.hbmColor);
int counter = 0;
//
byte* bits[1000];// = new byte[w * 4];
BITMAPINFO bmi;
memset(&bmi, 0, sizeof(BITMAPINFO));
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = 16;
bmi.bmiHeader.biHeight = 16;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biCompression = BI_RGB;
bmi.bmiHeader.biSizeImage = 0;
bmi.bmiHeader.biXPelsPerMeter = 0;
bmi.bmiHeader.biYPelsPerMeter = 0;
bmi.bmiHeader.biClrUsed = 0;
bmi.bmiHeader.biClrImportant = 0;
int rv = ::GetDIBits(memDC, ii.hbmColor, 0, 1, (void**)&bits, &bmi, DIB_RGB_COLORS);
}
Start by getting the parameters of the bitmap as recorded by Windows:
BITMAP bitmap = {0};
GetObject(ii.hbmColor, sizeof(bitmap), &bitmap);
You can use the returned values to populate the bmi structure.
And about the bmi structure: BITMAPINFO does not reserve enough space for a palette. You should create your own structure for this:
struct BitmapPlusPalette
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD palette[256];
};
Calculating the number of bytes needed for the bitmap is a bit tricky because it needs to be rounded up:
w = ((bitmap.bmWidth * bitmap.bmBitsPixel) + 31) / 8;
byte* bits = new byte[w * bitmap.bmHeight];
And here's a corrected version of your final line:
int rv = ::GetDIBits(dc, ii.hbmColor, 0, bitmap.bmHeight, bits, (BITMAPINFO *)&bmi, DIB_RGB_COLORS);
The problem with your code I think is in the way you allocated memory for 'bits' variable and how you used it then in GetDIBits function.
Firstly, the commented part byte* bits = new byte[w*4] was better than byte* bits[1000]. When you write byte* bits[1000] computer allocates 1000 POINTERS to byte. Each of these pointers doesn't point to anything.
Secondly, GetDIBits accepts LPVOID lpvBits as a 5th param. So, its a pointer to void.
In most platforms sizeof(void *) > sizeof(byte), so you can't just pass it a byte array, probably it would be better to pass a pointer to int or unsigned int (I'm not good at Windows types, so maybe something more appropriate should be better, sorry).
So, my guess is this:
unsigned bits[1000];
memset(bits, 0, sizeof(bits));
//...
int tv = GetDIBits(memDC, ii.hmbColor, 0, 1, (LPVOID)bits, /* ... */);