Md2 model vertices loading issue - c

I'm trying to load an MD2 model but I can't seem to get the vertices to draw correctly. I'm not loading UVs or normals at the moment just want to see the model appear correctly in a single frame then take it from there.
Here's my md2 structures (mostly taken from here):
struct v3
{
union
{
struct
{
union { float x; float r; };
union { float y; float g; };
union { float z; float b; };
};
float At[3];
};
};
struct md2_header
{
unsigned int Magic;
unsigned int Version;
unsigned int TextureWidth;
unsigned int TextureHeight;
unsigned int FrameSize;
unsigned int NumTextures;
unsigned int NumVertices;
unsigned int NumUVs;
unsigned int NumTrigs;
unsigned int NumGLCommands;
unsigned int NumFrames;
unsigned int OffsetTextures;
unsigned int OffsetUVs;
unsigned int OffsetTrigs;
unsigned int OffsetFrames;
unsigned int OffsetGLCommands;
unsigned int OffsetEnd;
};
struct md2_vertex
{
unsigned char At[3];
unsigned char NormalIndex;
};
struct md2_frame
{
float Scale[3];
float Translate[3];
char Name[16];
md2_vertex *Vertices;
};
struct md2_skin
{
char Name[64];
};
struct md2_uv
{
unsigned short u;
unsigend short v;
}
struct md2_triangle
{
unsigned short Vertices[3];
unsigned short UVs[3];
};
struct md2_model
{
md2_header Header;
md2_uv *UVs;
md2_triangle *Triangles;
md2_frame *Frames;
md2_skin *Skins;
int *GLCommands;
unsigned int Texture;
unsigned int VAO, VBO;
};
And here's my simple loading function:
void MD2LoadModel (char *FilePath, md2_model *Model)
{
FILE *File = fopen (FilePath, "rb");
if (!File)
{
fprintf (stderr, "Error: couldn't open \"%s\"!\n", FilePath);
return;
}
#define FREAD(Dest, Type, Count)\
fread(Dest, sizeof(Type), Count, File)
#define FSEEK(Offset)\
fseek(File, Offset, SEEK_SET)
#define ALLOC(Type, Count)\
(Type *)malloc(sizeof(Type) * Count)
/* Read Header */
FREAD(&Model->Header, md2_header, 1);
if ((Model->Header.Magic != 844121161) ||
(Model->Header.Version != 8))
{
fprintf (stderr, "Error: bad md2 Version or identifier\n");
fclose (File);
return;
}
/* Memory allocations */
Model->Skins = ALLOC(md2_skin, Model->Header.NumTextures);
Model->UVs = ALLOC(md2_uv, Model->Header.NumUVs);
Model->Triangles = ALLOC(md2_triangle, Model->Header.NumTrigs);
Model->Frames = ALLOC(md2_frame, Model->Header.NumFrames);
Model->GLCommands = ALLOC(int, Model->Header.NumGLCommands);
/* Read model data */
FSEEK(Model->Header.OffsetTextures);
FREAD(Model->Skins, md2_skin, Model->Header.NumTextures);
FSEEK(Model->Header.OffsetUVs);
FREAD(Model->UVs, md2_uv, Model->Header.NumUVs);
FSEEK(Model->Header.OffsetTrigs);
FREAD(Model->Triangles, md2_triangle, Model->Header.NumTrigs);
FSEEK(Model->Header.OffsetGLCommands);
FREAD(Model->GLCommands, int, Model->Header.NumGLCommands);
/* Read frames */
FSEEK(Model->Header.OffsetFrames);
for (int i = 0; i < Model->Header.NumFrames; i++)
{
/* Memory allocation for vertices of this frame */
Model->Frames[i].Vertices = (md2_vertex *)
malloc(sizeof(md2_vertex) * Model->Header.NumVertices);
/* Read frame data */
FREAD(&Model->Frames[i].Scale, v3, 1);
FREAD(&Model->Frames[i].Translate, v3, 1);
FREAD(Model->Frames[i].Name, char, 16);
FREAD(Model->Frames[i].Vertices, md2_vertex, Model->Header.NumVertices);
}
v3 *Vertices = ALLOC(v3, Model->Header.NumVertices);
md2_frame *Frame = &Model->Frames[0];
For(u32, i, Model->Header.NumVertices)
{
Vertices[i] = V3(
(Frame->Vertices[i].At[0] * Frame->Scale[0]) + Frame->Translate[0],
(Frame->Vertices[i].At[1] * Frame->Scale[1]) + Frame->Translate[1],
(Frame->Vertices[i].At[2] * Frame->Scale[2]) + Frame->Translate[2]);
}
glGenBuffers(1, &Model->VBO);
glBindBuffer(GL_ARRAY_BUFFER, Model->VBO);
glBufferData(GL_ARRAY_BUFFER, Model->Header.NumVertices * sizeof(v3), Vertices, GL_STATIC_DRAW);
glGenVertexArrays(1, &Model->VAO);
glBindVertexArray(Model->VAO);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
fclose (File);
free(Vertices);
#undef FSEEK
#undef FREAD
#undef ALLOC
}
Only passing the vertices data. Which, from my understanding Header->NumVertices is the number of vertices in each frame. So I'm taking an arbitrary frame (frame 0 in this case) and reading its uncompressed vertices data into Vertices.
Now I read in a book that Quake had their y and z axes flipped, but that still didn't change much.
Here's how I'm drawing the model:
GLuint Shader = Data->Shaders.Md2Test;
ShaderUse(Shader);
ShaderSetM4(Shader, "view", &WorldToView);
ShaderSetM4(Shader, "projection", &ViewToProjection);
glBindVertexArray(DrFreak.VAO);
{
ModelToWorld = m4_Identity;
ShaderSetM4(Shader, "model", &ModelToWorld);
glDrawArrays(GL_TRIANGLES, 0, DrFreak.Header.NumVertices);
}
glBindVertexArray(0);
The matrices are calculated in a CameraUpdate function which I can verify is working correctly because everything else in the scene render properly except the MD2 model. See:
Everything in yellow is supposed to be the MD2 model.
Here are my shaders (pretty much the same shaders for the crates and planes except there's only one 'in' variable, the position and no UVs):
#version 330 core
layout (location = 0) in vec3 position;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(position, 1.0f);
}
#version 330 core
out vec4 color;
void main()
{
color = vec4(1, 1, 0, 1);
}
I've been stuck here for a couple of days. I stepped into the loading code and I seem to be getting valid values. I'm not sure what's the issue. What am I doing wrong/missing?
Any help is appreciated.

I fixed the problem by duplicating the vertices/uvs getting them from the tirangles data. I didn't have to flip the 't' UV coordinate like many tutorials do. I switched the y and z coordinates cause they're flipped.
u32 NumVerts = Model->Header.NumTrigs * 3;
u32 NumUVs = NumVerts;
v3 *Vertices = ALLOC(v3, NumVerts);
v2 *UVs = ALLOC(v2, NumUVs);
md2_frame *Frame = &Model->Frames[0]; // render first frame for testing
For(u32, i, Model->Header.NumTrigs)
{
For(u32, j, 3)
{
u32 VertIndex = Model->Triangles[i].Vertices[j];
Vertices[i * 3 + j] = V3(
(Frame->Vertices[VertIndex].At[0] * Frame->Scale[0]) + Frame->Translate[0],
(Frame->Vertices[VertIndex].At[2] * Frame->Scale[2]) + Frame->Translate[2],
(Frame->Vertices[VertIndex].At[1] * Frame->Scale[1]) + Frame->Translate[1]);
u32 UVIndex = Model->Triangles[i].UVs[j];
UVs[i * 3 + j] = V2(
Model->UVs[UVIndex].u / (r32)Model->Header.TextureWidth,
Model->UVs[UVIndex].v / (r32)Model->Header.TextureHeight);
}
}
glGenVertexArrays(1, &Model->VAO);
glBindVertexArray(Model->VAO);
glGenBuffers(1, &Model->VBO);
glBindBuffer(GL_ARRAY_BUFFER, Model->VBO);
glBufferData(GL_ARRAY_BUFFER, NumVerts * sizeof(v3), Vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
u32 UVBuffer;
glGenBuffers(1, &UVBuffer);
glBindBuffer(GL_ARRAY_BUFFER, UVBuffer);
glBufferData(GL_ARRAY_BUFFER, NumUVs * sizeof(v2), UVs, GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
I will probably use indexed arrays and glDrawElements. But for my testing purposes glDrawArrays is good enough. If anyone knows of a better way to do all this feel free to leave a comment.
And there's Dr Freak chillin'

Related

Declaring char variable breaks program

I am exploring .tga files.
I have fully working code that looks like this:
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
const int letterHeight = 34;
const int spacer = 5;
typedef struct{
uint8_t idlength;
uint8_t colourmaptype;
uint8_t datatypecode;
uint16_t colourmaporigin;
uint16_t colourmaplength;
uint8_t colourmapdepth;
uint16_t x_origin;
uint16_t y_origin;
uint16_t width;
uint16_t height;
uint8_t bitsperpixel;
uint8_t imagedescriptor;
} TGA_Header;
typedef struct
{
uint8_t B;
uint8_t G;
uint8_t R;
} Pixel;
typedef struct{
TGA_Header header;
Pixel* pixels;
int width;
int height;
} Image;
void readHeader(TGA_Header* header, FILE* input_F){
fread(&header->idlength, sizeof(header->idlength), 1, input_F);
fread(&header->colourmaptype, sizeof(header->colourmaptype), 1, input_F);
fread(&header->datatypecode, sizeof(header->datatypecode), 1, input_F);
fread(&header->colourmaporigin, sizeof(header->colourmaporigin), 1, input_F);
fread(&header->colourmaplength, sizeof(header->colourmaplength), 1, input_F);
fread(&header->colourmapdepth, sizeof(header->colourmapdepth), 1, input_F);
fread(&header->x_origin, sizeof(header->x_origin), 1, input_F);
fread(&header->y_origin, sizeof(header->y_origin), 1, input_F);
fread(&header->width, sizeof(header->width), 1, input_F);
fread(&header->height, sizeof(header->height), 1, input_F);
fread(&header->bitsperpixel, sizeof(header->bitsperpixel), 1, input_F);
fread(&header->imagedescriptor, sizeof(header->imagedescriptor), 1, input_F);
}
void writeHeader(TGA_Header* header, FILE* output_F){
fwrite(&header->idlength, sizeof(header->idlength), 1, output_F);
fwrite(&header->colourmaptype, sizeof(header->colourmaptype), 1, output_F);
fwrite(&header->datatypecode, sizeof(header->datatypecode), 1, output_F);
fwrite(&header->colourmaporigin, sizeof(header->colourmaporigin), 1, output_F);
fwrite(&header->colourmaplength, sizeof(header->colourmaplength), 1, output_F);
fwrite(&header->colourmapdepth, sizeof(header->colourmapdepth), 1, output_F);
fwrite(&header->x_origin, sizeof(header->x_origin), 1, output_F);
fwrite(&header->y_origin, sizeof(header->y_origin), 1, output_F);
fwrite(&header->width, sizeof(header->width), 1, output_F);
fwrite(&header->height, sizeof(header->height), 1, output_F);
fwrite(&header->bitsperpixel, sizeof(header->bitsperpixel), 1, output_F);
fwrite(&header->imagedescriptor, sizeof(header->imagedescriptor), 1, output_F);
}
void image_load(Image* image, const char* path){
FILE* input_F = fopen(path, "rb");
readHeader(&image->header, input_F);
image->width = image->header.width;
image->height = image->header.height;
image->pixels = (Pixel*) malloc(sizeof(Pixel) * image->header.width * image->header.height);
fread(image->pixels, sizeof(Pixel), image->header.width * image->header.height, input_F);
fclose(input_F);
}
void image_create(Image* image, const char* path){
FILE* output_F = fopen(path, "wb");
writeHeader(&image->header, output_F);
fwrite(image->pixels, sizeof(Pixel), image->header.width * image->header.height, output_F);
fclose(output_F);
}
void load_letters(Image (*letters)[26], const char* f){
char path[101];
for(int i=0; i<26; i++){
strcpy(path, f);
strcat(path, "/");
char c[2] = {(char)(65+i), '\0'};
strcat(path, c);
strcat(path, ".tga\0");
image_load(&(*letters)[i], &path[0]);
}
}
void drawLetter(Image* image, Image* letter, int X, int Y){
Y += letterHeight - letter->height;
int letter_y = letter->height;
int letter_x = letter->width;
int image_x = image->width;
for(int y=0; y<letter_y; y++){
for(int x=0; x<letter_x; x++){
if(letter->pixels[y*letter_x+x].R != (uint8_t)0 || letter->pixels[y*letter_x+x].G != (uint8_t)0 || letter->pixels[y*letter_x+x].B != (uint8_t)0){
image->pixels[(y+Y)*image_x+(x+X)] = letter->pixels[y*letter_x+x];
}
}
}
}
void drawString(Image* image, Image (*letters)[26], char (*text)[101], int Y){
int dejToSzajzym = 0;
for(int i=0; i<strlen((*text)); i++){
dejToSzajzym += (*letters)[(int)(*text)[i] - 65].width;
}
dejToSzajzym = dejToSzajzym/2;
dejToSzajzym = image->width/2 - dejToSzajzym;
for(int i=0; i<strlen(*text); i++){
if((*text)[i] != ' '){
drawLetter(image, &(*letters)[(int)(*text)[i] - 65], dejToSzajzym, Y);
dejToSzajzym += (*letters)[(int)(*text)[i] - 65].width;
}else{
dejToSzajzym += 10;
}
}
}
int main(int argc, char* argv[]){
Image* image;
Image letters[26];
image_load(image, "img1.tga");
load_letters(&letters, "font");
/*
char buffer[100];
*/
drawString(image, &letters, "LOL", 5);
image_create(image, "image.tga");
free(image->pixels);
image->pixels = NULL;
for(int i=0; i<26; i++){
free(letters[i].pixels);
letters[i].pixels = NULL;
}
return 0;
}
But when I write the declaration of buffer as shown (could be anywhere in main) the program immediately breaks.
It doesn´t even need to do anything.
error:
Unable to open 'memmove-vec-unaligned-erms.S': Unable to read file '/build/glibc-YYA7BZ/glibc-
2.31/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S'
(Error: Unable to resolve non-existing file '/build/glibc-YYA7BZ/glibc-2.31/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S').
BTW: Isn't there any easier way to copy the header data?
As noted by Retired Ninja in their comment, your primary problem is that Image *image; doesn't initialize image to point anywhere in particular. You pass the uninitialized pointer to image_load(), which then scribbles on memory — and you've no idea where. This is all undefined behaviour. Adding the variable buffer moves something around and changes the behaviour, but it is still undefined — anything goes and any (mis-)behaviour is valid, especially crashes. You must fix that! One way would be to change the definition to Image image; and pass &image to image_load() and the other functions that expect an Image *.
BTW: Isn't there any easier way to copy the header data?
Yes, there is, and there are a couple of ways to do it. The fundamental observation is that you could write the header with fwrite(header, sizeof(*header), 1, fp), and read it with fread(header, sizeof(*header), 1, fp).
However, with the data structure as currently defined, there is some padding in the structure — one byte after datatypecode and another after colourmapdepth. If you moved colourmapdepth after datatypecode (or anywhere near the start of the structure before the first uint16_t member), you'd save two bytes in memory and have no padding bytes on disk. OTOH, there's not a lot of harm in the padding bytes being read/written. It isn't clear to me whether you're dealing with an externally imposed header structure or whether you're free to modify it.
The best way to avoid padding in a structure is to put the most stringently aligned types at the start of the structure (uint16_t is more stringently aligned than uint8_t) and less stringently aligned types at the end. That normally avoids holes in the structure. There can still be padding at the end of the structure even so.
The compiler is attempting to call memmove and can't find it because you're running with no libraries.
Assuming you're doing what I think you're doing, the best way is to provide it. In another file, declare memmove like so.
void *memmove(void *sm, const void *tm, size_t n)
{
char *s = (char *)sm;
const char *t = (const char *)tm;
if (s > t) {
s += n;
t += n;
while (n--)
*--s = *--t;
} else {
while (n--)
*s++ = *t++;
}
return sm;
}
You may have to fiddle with the declaration to get the compiler to accept it.
Note that this is not the best possible memmove, it's the simplest. If it's too slow, write or obtain a faster one.

opengl compute shader to sum over multiplications

I'm pretty new to this. here is the shader code:
#version 430 es
precision mediump float;
layout(local_size_x = 100, local_size_y = 1, local_size_z = 1) in;
layout(std430) buffer;
layout(binding = 0) buffer Input0 {
float elements[];
} input_data0;
layout(binding = 1) buffer Input1 {
float elements[];
} input_data1;
layout(binding = 2) buffer Output1 {
float elements[];
} output_data0;
void main()
{
uint index = gl_GlobalInvocationID.x;
float result = input_data0.elements[index] * input_data1.elements[index];
atomicAdd(output_data0.elements[0], result);
}
and here is how I call it:
glUseProgram(ann->sumShaderProgram);
glGenBuffers(1, &SSBO1);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, SSBO1);
glBufferData(GL_SHADER_STORAGE_BUFFER, num_connections * sizeof(GLfloat), weights, GL_STATIC_DRAW);
glGenBuffers(1, &SSBO2);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, SSBO2);
glBufferData(GL_SHADER_STORAGE_BUFFER, num_connections * sizeof(GLfloat), layer_it->values, GL_STATIC_DRAW);
glGenBuffers(1, &SSBO3);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, SSBO3);
glBufferData(GL_SHADER_STORAGE_BUFFER, 1 * sizeof(GLfloat), &neuron_sum, GL_DYNAMIC_DRAW);
glDispatchCompute(num_connections/100, 1, 1);
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
output = (fann_type*)glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, 1 * sizeof(GLfloat), GL_MAP_READ_BIT);
neuron_sum = output[0];
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
glDeleteBuffers(1, &SSBO1);
glDeleteBuffers(1, &SSBO2);
glDeleteBuffers(1, &SSBO3);
glMapBufferRange returns NULL and the program segfaults. it looks like a weird function call, how is it supposed to know to use the third buffer? is there a way to retrieve just a single output float instead of a buffer of 1 element, and would that be better?

I'm having problems with OpenGL and i cannot find where the errors are...?

i'm trying to learn something about computer graphics, and i'm using OpenGL with C (i know, i like pain haha).
Right now i'm following the tutorials from learnopengl.com, but althought everything seems fine to me, nothing get draw to the screen... whene i "turn" wireframe mode on, just a single line gets drawn.
Here's the code so far (i'm sorry if it's too long or messy):
main.cpp
#define m_DEBUG_MODE
#include <Engine/Engine.h>
int main()
{
/*===============================================*/
GLFWwindow* window;
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 6);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
if (!glfwInit()) {
printf("Could not initialize GLFW...");
return -1;
}
window = glfwCreateWindow(640, 480, "Hello World", NULL, NULL);
if (!window) {
printf("Could not create the window...");
glfwTerminate();
return -1;
}
glfwSwapInterval(1);
glfwMakeContextCurrent(window);
GLenum err = glewInit();
if (GLEW_OK != err) {
printf("Could not initialize GLEW...");
return -1;
}
printf("%s\n", glGetString(GL_VERSION));
list_t scene;
list_new(scene, mesh_t);
/*===============================================*/
float vertices[] = {
/* posistion | rgba */
-0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 0.0f, 0.5f, 1.0f, // 0
0.5f, -0.5f, 1.0f, 1.0f, 0.5f, 1.0f, 0.0f, 1.0f, // 1
0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.0f, 1.0f, 1.0f, // 2
-0.5f, 0.5f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // 3
};
float indices[6] = {
0, 1, 2, // tri. 1
2, 3, 0, // tri. 2
};
buffer_data_t vb = { .size=sizeof(vertices), .data=vertices, .usage=GL_STATIC_DRAW };
buffer_data_t eb = { .size=sizeof(indices), .data=indices, .usage=GL_STATIC_DRAW };
attrib_layout_t ap[2] = {
{ .index=0, .size=4, .type=GL_FLOAT, .normalized=GL_FALSE, .stride=sizeof(float) * 8, .pointer=(void*)(0 * sizeof(float)) }, // position
{ .index=1, .size=4, .type=GL_FLOAT, .normalized=GL_FALSE, .stride=sizeof(float) * 8, .pointer=(void*)(4 * sizeof(float)) }, // color
};
objectID m_shader = create_shader("./res/shaders/vertex.shader", "./res/shaders/fragment.shader");
mesh_t mesh1;
DEBUG_GL(mesh_init(
&mesh1, 4,
vb, ap, sizeof(ap) / sizeof(attrib_layout_t), eb,
m_shader
));
list_add(scene, mesh_t, mesh1);
unbind_all();
/*===============================================*/
while (!glfwWindowShouldClose(window))
{
// RENDERING
glClearColor(0.2f, 0.2f, 0.2f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
for (unsigned int i=0; i<scene.size; i++) {
DEBUG_GL(render(list_get(scene, mesh_t, i), GL_TRIANGLES, 0));
}
glfwSwapBuffers(window);
glfwPollEvents();
}
/*===============================================*/
list_delete(scene, mesh_t);
return 0;
}
Engine.h
#ifndef M_TOOLS
#define M_TOOLS
#define GLEW_STATIC
#include <GL/glew.h>
#define GLFW_DLL
#include <GLFW/glfw3.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// remove "unused" warnings
#define USE( x ) (void)x
/*============
List
============*/
typedef struct list_t {
void * data; // typeless data pointer
unsigned int size; // count of elements
}list_t;
#define list_new( _list, T )\
{\
T* temp_data = (T*)malloc( sizeof(T) * 0 );\
_list.data = (void*)temp_data;\
_list.size = 0;\
}
#define list_add( _list, T, val )\
{ /* make it a scoped block ( so it deletes temp_data ) */\
/* create a temp array to store old data */\
T* temp_data = (T*)malloc( sizeof(T) * _list.size );\
if (_list.data != NULL) {\
for (unsigned int i=0; i<_list.size; i++) {\
temp_data[i] = ((T*)_list.data)[i];\
}\
}\
/* clear the old data, create a new array with the right size,
and put the old values (+ the new one) inside of it */\
free( (T*)_list.data );\
_list.size += 1;\
_list.data = (void*)malloc( sizeof(T) * _list.size );\
for (unsigned int i=0; i<_list.size - 1; i++) {\
((T*)_list.data)[i] = temp_data[i];\
}\
((T*)_list.data)[_list.size - 1] = val;\
free( temp_data );\
}
#define list_get( _list, T, index )\
(index < _list.size) ? &((T*)_list.data)[index] : NULL
#define list_remove( _list, T, index )\
{\
T* temp_data = (T*)malloc( sizeof(T) * _list.size );\
for ( unsigned int i=0; i<_list.size; i++ ) {\
temp_data[i] = ((T*)_list.data)[i];\
}\
_list.size -= 1;\
for (unsigned int i=0; i<_list.size; i++) {\
if (i != index) {\
*((T*)_list.data + i) = temp_data[i];\
} else {\
continue;\
}\
}\
}
#define list_delete( _list, T )\
free( (T*)(_list.data) );\
_list.data = NULL;\
_list.size = 0;
/*==============
misc
==============*/
typedef unsigned int objectID;
// unbind stuff
void unbind_all() {
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glUseProgram(0);
}
void ClearGLErrors() {
while (glGetError() != GL_NO_ERROR);
}
int LogGLErrors(const char* file, const int line, const char* function) {
int stop = 0;
GLenum error = glGetError();
while (error) {
printf("[OpenGL Error: %d] %s : %d\n%s\n", error, file, line, function);
stop = 1;
error = glGetError();
}
if (stop)
return 0;
else
return 1;
}
#define ASSERT(x) if (!(x)) exit(-1)
#ifdef m_DEBUG_MODE
#define DEBUG_GL(x) ClearGLErrors();\
x;\
ASSERT(LogGLErrors(__FILE__, __LINE__, #x))
#endif
#ifndef m_DEBUG_MODE
#define DEBUG_GL(x) x;
#endif
// append a char to a string
char* append_to_str(char* str, char c) {
size_t len = strlen(str);
/* one for extra char, one for trailing zero */
char *str2 = malloc(len + 1 + 1);
strcpy(str2, str);
str2[len] = c;
str2[len + 1] = '\0';
// free temp str and return res
free(str2);
return str;
}
// return a copy of the string
char* str_copy(char* str) {
char* new_str = "";
for (char* c=str; *c!='\0'; c++) {
append_to_str(new_str, *c);
}
return new_str;
}
// read a file and dump its content in a string
char* read_file(char* filepath) {
FILE* file;
char* buffer;
long numbytes;
// open file and search for EOF
file = fopen(filepath, "r");
fseek(file, 0L, SEEK_END);
numbytes = ftell(file);
fseek(file, 0L, SEEK_SET);
buffer = (char*)calloc(numbytes, sizeof(char));
// dump data into the buffer string
fread(buffer, sizeof(char), numbytes, file);
fclose(file);
return buffer;
//free(buffer); <-- idk
}
/*===============
Shaders
===============*/
// errors logging process
#define shader_error(id, status_type, iv, info_log, delete, index)\
int log_info##index;\
iv(id, status_type, &log_info##index);\
if (log_info##index == GL_FALSE) {\
int len;\
iv(id, GL_INFO_LOG_LENGTH, &len);\
char* error_message = (char*)malloc(sizeof(char*) * len);\
info_log(id, len, &len, error_message);\
printf("%s\n", error_message);\
delete(id);\
return 0;\
}
objectID compile_shader(char* source_filepath, GLenum type) {
// parse shader source file
char* source_string = read_file(source_filepath);
// create shader object
objectID shader_id = glCreateShader(type);
glShaderSource(shader_id, 1, (const GLchar * const*)(&source_string), NULL);
glCompileShader(shader_id);
// check and log errors during compilation
shader_error(shader_id, GL_COMPILE_STATUS, glGetShaderiv, glGetShaderInfoLog, glDeleteShader, 0);
return shader_id;
}
objectID create_shader(char* vertex_filepath, char* fragment_filepath) {
// create the program, and attach compiled shaders
objectID program = glCreateProgram();
objectID vs = compile_shader(vertex_filepath, GL_VERTEX_SHADER);
objectID fs = compile_shader(fragment_filepath, GL_FRAGMENT_SHADER);
glAttachShader(program, vs);
glAttachShader(program, fs);
glLinkProgram(program);
glValidateProgram(program);
// check and log errors during program creation
shader_error(program, GL_ATTACHED_SHADERS, glGetProgramiv, glGetProgramInfoLog, glDeleteProgram, 0);
shader_error(program, GL_LINK_STATUS, glGetProgramiv, glGetProgramInfoLog, glDeleteProgram, 1);
shader_error(program, GL_VALIDATE_STATUS, glGetProgramiv, glGetProgramInfoLog, glDeleteProgram, 2);
glDeleteShader(vs);
glDeleteShader(fs);
return program;
}
/*===============
Mesh object
===============*/
typedef struct buffer_data_t {
GLsizeiptr size;
const GLvoid * data;
GLenum usage;
}buffer_data_t;
typedef struct attrib_layout_t {
GLuint index;
GLint size;
GLenum type;
GLboolean normalized;
GLsizei stride;
const GLvoid * pointer;
}attrib_layout_t;
typedef struct mesh_t { // actual mesh object
objectID vao; // vertex array object
objectID vbo; // vertex buffer object
objectID ebo; // element buffer object
objectID shader; // shader program
unsigned int vert_count; // vertices count
}mesh_t;
void mesh_bind(mesh_t* mesh) {
unbind_all();
// bind mesh components
DEBUG_GL(glBindVertexArray(mesh->vao));
DEBUG_GL(glBindBuffer(GL_ARRAY_BUFFER, mesh->vbo));
DEBUG_GL(glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh->ebo));
DEBUG_GL(glUseProgram(mesh->shader));
}
void mesh_init(
mesh_t* mesh, unsigned int _vert_count, // mesh object
buffer_data_t vb, // vertex buffer
attrib_layout_t* ap, unsigned int ap_n, // attribute pointers
buffer_data_t eb, // element buffer
objectID shader_program // shader
) {
unbind_all();
mesh->vert_count = _vert_count;
mesh->shader = shader_program;
// vertex array
DEBUG_GL(glGenVertexArrays(1, &mesh->vao));
DEBUG_GL(glBindVertexArray(mesh->vao));
// vertex buffer object
DEBUG_GL(glGenBuffers(1, &mesh->vbo));
DEBUG_GL(glBindBuffer(GL_ARRAY_BUFFER, mesh->vbo));
DEBUG_GL(glBufferData(GL_ARRAY_BUFFER, vb.size, vb.data, vb.usage));
// element buffer object
DEBUG_GL(glGenBuffers(1, &mesh->ebo));
DEBUG_GL(glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh->ebo));
DEBUG_GL(glBufferData(GL_ELEMENT_ARRAY_BUFFER, eb.size, eb.data, eb.usage));
// attribute pointers
for (unsigned int i=0; i<ap_n; i++) {
DEBUG_GL(glVertexAttribPointer(ap[i].index, ap[i].size, ap[i].type, ap[i].normalized, ap[i].stride, ap[i].pointer));
DEBUG_GL(glEnableVertexAttribArray(ap[i].index));
}
unbind_all();
}
/*===============
Renderer
===============*/
void render(mesh_t* mesh, GLenum draw_mode, int wireframe) {
// wireframe mode (polygon fill - polygon line)
if (wireframe) { glPolygonMode( GL_FRONT_AND_BACK, GL_LINE ); }
else { glPolygonMode( GL_FRONT_AND_BACK, GL_FILL ); }
// bind - draw - unbind
mesh_bind(mesh);
glDrawElements( draw_mode, mesh->vert_count, GL_UNSIGNED_INT, NULL );
unbind_all();
}
#endif /* M_TOOLS */
Tell me if i'm doing anything wrong, i just can't find anything really (although, i think it has something to do with the shaders)...
The type of the indices needs to be integral and correspond to the type specified in the draw call:
float indices[6] = {
unsigned int indices[6] = {
0, 1, 2, // tri. 1
2, 3, 0, // tri. 2
};
The type specifier GL_UNSIGNED_INT is used in the glDrawElements instruction. Therefore the type of the indices must be unsigned int.

Generating Vertex Arrays for OpenGL

I want to use some convenience methods to generate vertex and colour arrays for use in objects. From what i've seen on generating arrays, this is an example of what I currently use:
GLfloat * CDMeshVertexesCreateRectangle(CGFloat height, CGFloat width) {
// Requires the rendering method GL_TRIANGLE_FAN
GLfloat *squareVertexes = (GLfloat *) malloc(8 * sizeof(GLfloat));
squareVertexes[0] = -(width / 2);
squareVertexes[1] = -(height / 2);
squareVertexes[2] = -(width / 2);
squareVertexes[3] = (height / 2);
squareVertexes[4] = (width / 2);
squareVertexes[5] = (height / 2);
squareVertexes[6] = (width / 2);
squareVertexes[7] = -(height / 2);
return squareVertexes;
}
But when i use it on something such as this:
GLuint memoryPointer = 0;
GLuint colourMemoryPointer = 0;
GLfloat *vertexes = CDMeshVertexesCreateRectangle(200, 200);
GLfloat *colors = CDMeshColorsCreateGrey(1.0, 4);
// Allocate the buffer
glGenBuffers(1, &memoryPointer);
// Bind the buffer object (tell OpenGL what to use)
glBindBuffer(GL_ARRAY_BUFFER, memoryPointer);
// Allocate space for the VBO
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexes), vertexes, GL_STATIC_DRAW);
// Allocate the buffer
glGenBuffers(1, &colourMemoryPointer);
// Bind the buffer object (tell OpenGL what to use)
glBindBuffer(GL_ARRAY_BUFFER, colourMemoryPointer);
// Allocate space for the VBO
glBufferData(GL_ARRAY_BUFFER, sizeof(colors), colors, GL_STATIC_DRAW);
glEnableClientState(GL_VERTEX_ARRAY); // Activate vertex coordinates array
glEnableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, memoryPointer);
glVertexPointer(2, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, colourMemoryPointer);
glColorPointer(4, GL_FLOAT, 0, 0);
//render
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY); // Deactivate vertex coordinates array
glDisableClientState(GL_COLOR_ARRAY);
free(vertexes);
free(colors);
The rendering doesn't hold up, and random problems occur during rendering such as flickering, color distortion and more. When using the same code for initialisation and rendering when using a normally defined array (removing the generated vertexes and its related code), no problems occur.
GLfloat Square[8] = {
-100, -100,
-100, 100,
100, 100,
100, -100
};
Does anyone know where i'm going wrong?
You have two problems in your code. First this pattern:
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexes), vertexes, GL_STATIC_DRAW);
sizeof(vertexes) evaluates to the size of the pointer variable, not the size of the buffer. C/C++ newbie mistake, we all did it. You need to keep track of the size yourself. So do it like this:
int allocate_a_buffer(CGFloat height, CGFloat width, GLfloat **buffer, size_t *buffer_size)
{
// Requires the rendering method GL_TRIANGLE_FAN
return ( *buffer = (GLfloat *) malloc( *buffer_size = ( <xxx> * sizeof(GLfloat)) ) ) != 0;
}
and
GLfloat *vertices;
size_t vertices_size;
if( !allocate_a_buffer(..., &vertices, &vertices_size) ) {
error();
return;
}
glBufferData(GL_ARRAY_BUFFER, vertices_size, vertices, GL_STATIC_DRAW);
If you're using C++ just use a std::vector passed by reference:
void initialize_buffer(..., std::vector<GLfloat> &buffer)
{
buffer.resize(...);
for(int n = ...; ...; ...) {
buffer[n] = ...;
}
}
and
std::vector<GLfloat> vertices;
initialize_buffer(..., vertices);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(vertices[0]), &vertices[0], GL_STATIC_DRAW);
much less fuzz.
The other problem is, that this code seems to be called by the drawing function. The whole point of buffer objects is, that you initialize them only one time and then only bind and draw from them in the display routine. So glDrawArrays belongs into another function than the rest of this code, namely the display routine, while the rest belongs into the data loading and scene data management code.

How do I create a GtkImage from a Cairo surface?

I want to be able to make a GtkImage from a Cairo surface (without writing a temp file).
I currently write the surface as PNG to a char array which I then feed to a PixbufLoader to get a Pixbuf which I use to create the GtkImage:
typedef struct
{
unsigned char *pos;
unsigned char *end;
} closure_t;
static cairo_status_t
png_to_array (void *closure, const unsigned char *data, unsigned int length)
{
closure_t *cl = (closure_t *) closure;
if ((cl->pos + length) > (cl->end))
return CAIRO_STATUS_WRITE_ERROR;
memcpy (cl->pos, data, length);
cl->pos += length;
return CAIRO_STATUS_SUCCESS;
}
// later in the code
cairo_surface_t *surface = ...
...
// how would i determine the right size?
unsigned char arr[...];
closure_t cl;
GtkWidget *image;
GdkPixbufLoader *pbloader;
GdkPixbuf *pb;
// copy surface png to arr
cl.pos = arr;
cl.end = arr + sizeof(arr);
cairo_surface_write_to_png_stream (surface,
(cairo_write_func_t) png_to_array,
&cl);
...
// write to pixbufloader, get pixbuf, create image
pbloader = gdk_pixbuf_loader_new();
gdk_pixbuf_loader_write(pbloader, arr, sizeof(arr), NULL);
gdk_pixbuf_loader_close(pbloader, NULL);
pb = gdk_pixbuf_loader_get_pixbuf(pbloader);
image = gtk_image_new_from_pixbuf(pb);
This seems rather cumbersome - isn't there an easier way to do this?
How would I determine the size of the array in my example?
One function will save you a lot of effort here. Look up gdk_pixbuf_get_from_surface. It gets a Pixbuf from a cairo_surface_t. Of coarse, realising as he writes it, that is only available if you use Gdk-3.0, which also means using Gtk+-3.0.
Of coarse if you want to use Gtk+-2.0 then you can create a pixmap, get a cairo_t from it then copy your other cairo_surface_t to it by
cairo_set_source_surface (cr, surface, x0, y0);
cairo_rectangle (cr, x0 + x, y0 + y, width, height);
cairo_fill (cr);
A example of how to create a pixmap is below, I'll let you fill in the rest.
#include <gtk/gtk.h>
#include <cairo/cairo.h>
int main(gint argc, gchar *argv[])
{
GdkPixmap *pixmap;
GtkWidget *image;
GtkWidget *window;
cairo_t *cr;
gtk_init(&argc, &argv);
window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
g_signal_connect(G_OBJECT(window), "delete-event", G_CALLBACK(gtk_main_quit), NULL);
gtk_widget_show_all(window);
pixmap = gdk_pixmap_new(window->window, 100, 100, -1);
cr = gdk_cairo_create(pixmap);
cairo_set_source_rgb(cr, 0.0, 0.0, 0.0);
cairo_rectangle(cr, 10, 10, 80, 80);
cairo_fill(cr);
cairo_destroy(cr);
cr = NULL;
image = gtk_image_new_from_pixmap(pixmap, NULL);
gtk_container_add(GTK_CONTAINER(window), image);
gtk_widget_show(image);
gtk_main();
return 0;
}
Just a hint, I'm not sure how to determine the size of the array but I'm sure how long should be the size of the array since what your png_to_array callback will be called several times the size of your array should be the sum of all lengths
I'm using the stride to determine the size of array but at the end the total_lenght will determine the size of the png
int total_lenght = 0;
typedef struct
{
unsigned char *pos;
unsigned char *end;
} closure_t;
static cairo_status_t
png_to_array (void *closure, const unsigned char *data, unsigned int length)
{
closure_t *cl = (closure_t *) closure;
if ((cl->pos + length) > (cl->end))
return CAIRO_STATUS_WRITE_ERROR;
memcpy (cl->pos, data, length);
cl->pos += length;
total_lenght += lenght;
return CAIRO_STATUS_SUCCESS;
}
// later in the code
cairo_surface_t *surface = ...
...
int stride = cairo_image_surface_get_stride(surface);
unsigned char *arr = (unsigned char *) malloc(stride);
closure_t cl;
GtkWidget *image;
GdkPixbufLoader *pbloader;
GdkPixbuf *pb;
// copy surface png to arr
cl.pos = arr;
cl.end = arr + stride;
cairo_surface_write_to_png_stream (surface,
(cairo_write_func_t) png_to_array,
&cl);
...
// write to pixbufloader, get pixbuf, create image
pbloader = gdk_pixbuf_loader_new();
gdk_pixbuf_loader_write(pbloader, arr, total_lenght), NULL);
gdk_pixbuf_loader_close(pbloader, NULL);
pb = gdk_pixbuf_loader_get_pixbuf(pbloader);
image = gtk_image_new_from_pixbuf(pb);

Resources