OpenGL texture color mapping - c

I would like to efficiently perform a mapping from integers to colors on a buffer sent to the GPU as a texture object.
Currently my code looks like this:
// setup the rendering pipeline and init the texture object
glBindTexture(glTEXTURE_2D, texture);
// data is an array of integers ranging from 0 to 3 of length 144 * 160 like this: [0, 1, 2, 3, 3, 2, 1, ..., 1]
glTexImage2D(glTexture_2D, 0, /* not sure what to put here */, 160, 144, glRGB, /* also not sure what to put here */, data);
I want to be able to map the numbers from 0 to 3 into a RGB value.
I feel like I should be able to do this in a fragment shader, but how should I pass my data into the shader?
My fragment shader looks like this:
#version 330 core
out vec4 FragColor;
in vec2 TexCoord;
uniform sampler2D screen;
void main()
{
/*
* I would really like to be able to get the byte value
* at the texture's coordinate that we are processing and
* use an if ... else if ... else ... block to map the texture
*/
// FragColor = texture(screen, TexCoord);
}

Related

Object rendering strangely after adding transformations

I'm adding transformations to my C OpenGL program. I'm using CGLM as my maths library. The program has no warnings or errors. Still however, when I compile and run the program, I get a distorted version of my intended image (it was not distorted before adding transformations).
The following is my program's main loop:
// Initialize variables for framerate counting
double lastTime = glfwGetTime();
int frameCount = 0;
// Program loop
while (!glfwWindowShouldClose(window)) {
// Calculate framerate
double thisTime = glfwGetTime();
frameCount++;
// If a second has passed.
if (thisTime - lastTime >= 1.0) {
printf("%i FPS\n", frameCount);
frameCount = 0;
lastTime = thisTime;
}
processInput(window);
// Clear the window
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Bind textures on texture units
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture2);
// Create transformations
mat4 transform = {{1.0f}};
glm_mat4_identity(transform);
glm_translate(transform, (vec3){0.5f, -0.5f, 0.0f});
glm_rotate(transform, (float)glfwGetTime(), (vec3){0.0f, 0.0f, 1.0f});
// Get matrix's uniform location and set matrix
shaderUse(myShaderPtr);
GLint transformLoc = glGetUniformLocation(myShaderPtr->shaderID, "transform");
// mat4 transform;
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, (float*)transform);
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glfwSwapBuffers(window); // Swap the front and back buffers
glfwPollEvents(); // Check for events (mouse movement, mouse click, keyboard press, keyboard release etc.)
}
The Program is up on github here if you'd like to check out the full code.
The Output of the program is this (The square also rotates):
However, the intended output of the program is the penguin at 20% opacity on top and the box at 100% opacity underneath the penguin.
In the vertex shader, the location of the texture coordinate is 1:
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec2 aTexCoord;
However, when you specify the vertices, location 1 is used for the color attribute and position 2 for the text coordinates:
// Colour attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
// Texture coord attribute
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(2);
Remove the color attribute and use location 1 for the texture coordinates. e.g.:
// Texture coord attribute
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(1);
Looking at your source code, you're passing in three attributes (position, color and texture coordinates), but your vertex shader only takes two.
Removing the color attribute and instead passing the texture coordinates as attribute #1 instead of #2 should make it look like intended.

OpenGL ES 2.0 glDrawElements odd behavior

In my application (running on Mali-400 GPU) I am using OpenGL ES 2.0 to draw UI. For that I setup orthogonal projection matrix and use 2D vectors to describe the geometry (only x, y attributes).
So my vertex structure looks something like this:
struct my_vertext {
struct vec2 pos;
struct unsigned int color;
}
later then I prepare GL context, setup shaders:
vertex:
uniform mat4 matrix;
attribute vec2 pos;
attribute vec4 color;
varying vec4 frag_color;
void main() {
frag_color = color;
gl_Position = matrix * vec4(pos.xy, 0, 1);
};
fragment:
precision mediump float;
varying vec4 frag_color;
void main() {
gl_FragColor = frag_color;
};
and bind them with attribute arrays:
GLuint prog = glCreateProgram(); CHECK_GL;
// create shaders, load source and compile them
...
/// binding with attributes
GLuint attrib_pos, attrib_col, vertex_index = 0;
glBindAttribLocation(prog, vertex_index, "pos"); CHECK_GL;
attrib_pos = vertex_index++;
glBindAttribLocation(prog, vertex_index, "color"); CHECK_GL;
attrib_col = vertex_index++;
// link program
glLinkProgram(prog); CHECK_GL;
glUseProgram(prog); CHECK_GL;
When I render my geometry (for simplicity I am drawing 2 rectangles one behind another) - only the first call to glDrawElements produces an image on the screen (dark gray rectangle), the second (red) - doesn`t.
For rendering I am using Vertex Array Object with 2 bound buffers - one for geometry (GL_ARRAY_BUFFER) and the second one for indices (GL_ELEMENT_ARRAY_BUFFER). All the geometry is placed to this buffers and later is drawn with glDrawElements calls providing needed offsets.
My drawing code:
glBindVertexArray(vao); CHECK_GL;
...
GLuint *offset = 0;
for each UI object:
{
glDrawElements(GL_TRIANGLES, (GLsizei)ui_object->elem_count,
GL_UNSIGNED_SHORT, offset); CHECK_GL;
offset += ui_object->elem_count;
}
This puzzles me a lot since I check each and every return code of glXXX functions and all of them return GL_NO_ERROR. Additionally, I ran my program in Mali Graphics Debugger and the latter hasn`t revealed any problems/errors.
Geometry and indices for both calls (obtained from Mali Graphics Debugger):
First rectangle geometry (which is shown on screen):
0 Position=[30.5, 30.5] Color=[45, 45, 45, 255]
1 Position=[1250.5, 30.5] Color=[45, 45, 45, 255]
2 Position=[1250.5, 690.5] Color=[45, 45, 45, 255]
3 Position=[30.5, 690.5] Color=[45, 45, 45, 255]
Indices: [0, 1, 2, 0, 2, 3]
Second rectangle geometry (which isnt` shown on screen):
4 Position=[130.5, 130.5] Color=[255, 0, 0, 255]
5 Position=[230.5, 130.5] Color=[255, 0, 0, 255]
6 Position=[230.5, 230.5] Color=[255, 0, 0, 255]
7 Position=[130.5, 230.5] Color=[255, 0, 0, 255]
Indices: [4, 5, 6, 4, 6, 7]
P.S.: On my desktop everything works perfectly. I suppose it has something to do with limitations/peculiarities of embedded Open GL.
On my desktop:
$ inxi -F
$ ....
$ GLX Renderer: Mesa DRI Intel Ivybridge Desktop GLX Version: 3.0 Mesa 10.1.3
I know very little of OpenGL ES, but this part looks wrong:
glBindVertexArray(vao); CHECK_GL;
...
GLuint *offset = 0;
for each UI object:
{
glDrawElements(GL_TRIANGLES, (GLsizei)ui_object->elem_count,
GL_UNSIGNED_SHORT, offset); CHECK_GL;
offset += ui_object->elem_count;
}
compared to
glBindVertexArray(vao); CHECK_GL;
...
GLuint offset = 0;
for each UI object:
{
glDrawElements(GL_TRIANGLES, (GLsizei)ui_object->elem_count,
GL_UNSIGNED_SHORT, &offset); CHECK_GL;
offset += ui_object->elem_count;
}

Texture do not rotate with OpenGL (C)

I am trying to rotate a texture extracted from a video frame (provided by ffmpeg), I have tried the following code :
glTexSubImage2D(GL_TEXTURE_2D,
0,
0,
0,
textureWidth,
textureHeight,
GL_RGBA,
GL_UNSIGNED_BYTE,
//s_pixels);
pFrameConverted->data[0]);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glTranslatef(0.5,0.5,0.0);
glRotatef(90,0.0,0.0,1.0);
glTranslatef(-0.5,-0.5,0.0);
glMatrixMode(GL_MODELVIEW);
//glDrawTexiOES(-dPaddingX, -dPaddingY, 0, drawWidth + 2 * dPaddingX, drawHeight + 2 * dPaddingY);
glDrawTexiOES(0, 0, 0, drawWidth, drawHeight);
The image is not rotated, do you see the problem ?
From the GL_OES_draw_texture extension specification:
Note also that s, t, r, and q are computed for each fragment as part of DrawTex rendering. This implies that the texture matrix is ignored and has no effect on the rendered result.
You are trying to transform the texture coordinates using the fixed-function texture matrix, but like point sprites, those coordinates are generated per-fragment rather than per-vertex. Thus, that means that nothing you do to the texture matrix is ever going to affect the output of glDrawTexiOES (...).
Consider using a textured quad instead, those will pass through the traditional vertex processing pipeline.

Why is the third vertex of my first triangle always at the origin and the rest of my triangles not being drawn?

I've checked the results of everything and I've tidied up multiple bugs in my draw function already, but I still can't find the reason for the behavior described in the question title. I'm using OpenGL 1.4, 3D textures, vertex arrays, texture coordinate arrays, and glDrawArrays to draw models (from my model API) with textures (from my texture API) to the screen. Through looking at the results of everything (printfs), I've concluded the problem has to be in the block of code that actually draws everything, and not my code that fills these arrays with post animating vertex data (so I'm only posting the former to save on bloating this post).
The current color is used to achieve a per current window brightness effect. The variable msindex is already set to the number of model draw specifications before the loop featured begins. Vertex data and texture coordinate data for every model being drawn are actually all stuffed into one segment, and as you can see below there are glVertexPointer and glTexCoordPointer calls on different parts of the start of it to register this data. The contents of this segment are tightly packed, with three floats for the position of a vertex first and then three floats following for its texture coordinates. There is multitexturing (up to two textures specified much earlier in the model), but both textures share the same texture coordinates (which is why both calls to glTexCoordPointer specify the same location in memory). The while loop is meant to draw each individual specified model according to information for the model draw specification in the miaptr segment. Start is, in my code, the starting 6 float wide index into the overall vertex data segment for the first vertex of the model to be drawn, and count is the number of vertices. In my example case these are just 0 for start and 6 for count (attempted to draw one model with two triangles). Type can be multiple things depending on the model, but in this case it is GL_TRIANGLES. I've tried this with other primitive types, but they all suffer from the same problem. Additionally, the texture being drawn is entirely opaque (and green), the brightness of the target window is always 1, and all the primitives are front facing.
The following is my broken source code:
/* Enable/set global things. */
jgl.Viewport(
(GLint) x, (GLint) y, (GLsizei) width, (GLsizei) height
);
fvals[0] = (jWindowsptr + jGetCurrentWindow())->brightness;
jgl.Color4f(
(GLfloat) fvals[0],
(GLfloat) fvals[0],
(GLfloat) fvals[0],
1
);
jgl.Enable(GL_ALPHA_TEST);
jgl.Enable(GL_CULL_FACE);
jgl.CullFace(GL_BACK);
jgl.Enable(GL_DEPTH_TEST);
jgl.Enable(GL_POINT_SPRITE_ARB);
jgl.EnableClientState(GL_VERTEX_ARRAY);
const GLvoid *vaptrc = vaptr;
jgl.VertexPointer(3, GL_FLOAT, 12, vaptrc);
/* Color clearing is in here so I could see better while testing. */
jgl.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/* Enable/set per texture unit things. */
jgl.ActiveTexture(GL_TEXTURE0 + 1);
jgl.TexEnvi(
GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE
);
jgl.ClientActiveTexture(GL_TEXTURE0 + 1);
jgl.EnableClientState(GL_TEXTURE_COORD_ARRAY);
jgl.TexCoordPointer(3, GL_FLOAT, 12, (vaptrc + 3));
jgl.Enable(GL_TEXTURE_3D);
jgl.ActiveTexture(GL_TEXTURE0);
jgl.TexEnvi(
GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE
);
jgl.ClientActiveTexture(GL_TEXTURE0);
jgl.EnableClientState(GL_TEXTURE_COORD_ARRAY);
jgl.TexCoordPointer(3, GL_FLOAT, 12, (vaptrc + 3));
jgl.Enable(GL_TEXTURE_3D);
/* Pass #1. */
jgl.MatrixMode(GL_TEXTURE);
jgl.DepthFunc(GL_LESS);
jgl.AlphaFunc(GL_EQUAL, 1);
const GLfloat *tctm;
while (msindex > 0) {
msindex = msindex - 1;
jgl.ActiveTexture(GL_TEXTURE0);
jgl.BindTexture(
GL_TEXTURE_3D, (miaptr + msindex)->textureids[0]
);
if ((miaptr + msindex)->textureids[0] != 0) {
tctm
= (miaptr
+ msindex)->transformationmatrices[0];
jgl.LoadMatrixf(tctm);
}
jgl.ActiveTexture(GL_TEXTURE0 + 1);
jgl.BindTexture(
GL_TEXTURE_3D, (miaptr + msindex)->textureids[1]
);
if ((miaptr + msindex)->textureids[1] != 0) {
tctm
= (miaptr
+ msindex)->transformationmatrices[1];
jgl.LoadMatrixf(tctm);
}
jgl.DrawArrays(
(miaptr + msindex)->type,
(GLint) (miaptr + msindex)->start,
(GLsizei) (miaptr + msindex)->count
);
}
/* WIP */
/* Disable per texture unit things. */
jgl.ActiveTexture(GL_TEXTURE0 + 1);
jgl.ClientActiveTexture(GL_TEXTURE0 + 1);
jgl.DisableClientState(GL_TEXTURE_COORD_ARRAY);
jgl.Disable(GL_TEXTURE_3D);
jgl.ActiveTexture(GL_TEXTURE0);
jgl.ClientActiveTexture(GL_TEXTURE0);
jgl.DisableClientState(GL_TEXTURE_COORD_ARRAY);
jgl.Disable(GL_TEXTURE_3D);
/* WIP */
/* Disable global things. */
jgl.DisableClientState(GL_VERTEX_ARRAY);
jgl.Disable(GL_POINT_SPRITE_ARB);
jgl.Disable(GL_DEPTH_TEST);
jgl.Disable(GL_CULL_FACE);
jgl.Disable(GL_ALPHA_TEST);
Your description says that you have interleaved vertex attributes, with 3 floats for the position and 3 floats for the texture coordinates per vertex. This also matches the code you posted.
The values you pass as stride to the glVertexPointer() and glTexCoordPointer() does not match this, though. With 6 floats (3 for position + 3 for texture coordinate) per vertex, and a float being 4 bytes large, the stride should be 6 * 4 = 24. So all these calls need to use 24 for the stride:
jgl.VertexPointer(3, GL_FLOAT, 24, vaptrc);
jgl.TexCoordPointer(3, GL_FLOAT, 24, ...);

Advice on an algorithm for rendering a bitmap pixel by pixel

I've been working on a bitmap loader, with the main goal to do nothing more than parse the data properly and render it in OpenGL. I'm at the point where I need to draw the pixels on an x/y (i.e., pixel by pixel) basis (at least, this is what I think I need to do as far as rendering is concerned). I've already bound the texture object and called glTexImage2D(...).
Currently, what I'm having trouble with is the pixel by pixel algorithm.
As far as I understand it, bitmap (aka DIB) files store color data in what is known as the pixel array. Each row of pixels consists of x amount of bytes, with each pixel holding a byte count divisible either by 4 ( 32 bits per pixel ), 3 ( 24 bits per pixel ), 2 ( 16 bits per pixel ), or 1 ( 8 bits per pixel ).
I think need to loop through the pixels while at the same time calculating the right offset within the pixel array, which is relative to its pixel x/y coordinate. Is this true, though? If not, what should I do? I'm honestly slightly confused as to whether or not, despite doing what was directed to me in this question I asked sometime ago, this approach is correct.
I assume that going about it on a pixel by pixel basis is the right approach, mainly because
rendering a quad with glVertex* and glTexCoord* produced nothing more than a grayed out rectangle (at the time I thought the OpenGL would handle this by itself, hence why attempting that in the first place).
I should also note that, while my question displays OpenGL 3.1 shaders, I moved to SDL 1.2
so I could just use immediate mode for the time being until I got the right algorithms implemented, and then switch back to modern GL.
The test image I'm parsing:
It's data output (pastebinned due to its very long length):
http://pastebin.com/6RVhAVRq
And The Code:
void R_RenderTexture_PixByPix( texture_bmp_t* const data, const vec3 center )
{
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
glBegin( GL_POINTS );
{
const unsigned width = data->img_data->width + ( unsigned int ) center[ VEC_X ];
const unsigned height = data->img_data->height + ( unsigned int ) center[ VEC_Y ];
const unsigned bytecount = GetByteCount( data->img_data->bpp );
const unsigned char* pixels = data->img_data->pixels;
unsigned color_offset = 0;
unsigned x_pixel;
for ( x_pixel = center[ VEC_X ]; x_pixel < width; ++x_pixel )
{
unsigned y_pixel;
for ( y_pixel = center[ VEC_Y ]; y_pixel < height; ++y_pixel )
{
}
const bool do_color_update = true; //<--- replace true with a condition which checks to see if the color needs to be updated.
if ( do_color_update )
{
glColor3fv( pixels + color_offset );
}
color_offset += bytecount;
}
}
glEnd();
glBindTexture( GL_TEXTURE_2D, 0 );
}
You're completely missing the point of a OpenGL texture in your code. The texture holds the image for you and the rasterizer does all the iterations over the pixel data for you. No need to write a slow pixel-pusher loop yourself.
As your code stands right now that texture is completely bogus and does nothing. You could completely omit the calls to glBindTexture and it'd still work – or not, because you're not actually drawing anything, you just set the glColor state. To draw something you'd have to call glVertex.
So why not leverage the pixel-pushing performance of modern GPUs and actually use a texture? How about this:
void R_RenderTexture_PixByPix( texture_bmp_t* const data, const vec3 center )
{
if( 0 == data->texbuf_id ) {
glGenTextures(1, &(data->texbuf_id));
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// there are a few more, but the defaults are ok
// if you didn't change them no need for further unpack settings
GLenum internal_format;
GLenum format;
GLenum type;
switch(data->img_data->bpp) {
case 8:
// this could be a palette or grayscale
internal_format = GL_LUMINANCE8;
format = GL_LUMINANCE;
type = GL_UNSIGNED_BYTE;
break;
case 15:
internal_format = GL_RGB5;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_SHORT_1_5_5_5;
break;
case 16:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_SHORT_5_6_5;
break;
case 24:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_BYTE;
break;
case 32:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_INT_8_8_8_8;
break;
}
glTexImage2D( GL_TEXTURE_2D, 0, internal_format,
data->img_data->width, data->img_data->height, 0,
format, type, data->img_data->pixels );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
} else {
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
}
static GLfloat verts[] = {
0, 0,
1, 0,
1, 1,
0, 1
};
// the following is to address texture image pixel centers
// tex coordinates 0 and 1 are not on pixel centers!
float const s0 = 1. / (2.*tex_width);
float const s1 = ( 2.*(tex_width-1) + 1.) / (2.*tex_width);
float const t0 = 1. / (2.*tex_height);
float const t1 = ( 2.*(tex_height-1) + 1.) / (2.*tex_height);
GLfloat texcoords[] = {
s0, t0,
s1, t0,
s1, t1,
s0, t1
};
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
glVertexPointer(2, GL_FLOAT, 0, verts);
glTexCoordPointer(2, GL_FLOAT, 0, texcoords);
glColor4f(1., 1., 1., 1.);
glDrawArrays(GL_QUADS, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glBindTexture( GL_TEXTURE_2D, 0 );
}
Your intuition is basically correct. The pixels are stored as an array of bytes, but the bytes are arranged into consecutive groups, with each group representing a single pixel. To address a single pixel, you'll need to do a calculation like this:
unsigned char* image_data = start_of_pixel_data;
unsigned char* pixel_addr = image_data + bytes_per_pixel * (y * width_in_pixels + x);
Be careful about the width in pixels, as sometimes there is padding at the end of the row to bring the total row width in bytes up to a multiple of 4/8/16/32/64/etc. I recommend looking at the actual bytes of the bitmap in hex first to get a sense of what is going on. It's a great learning exercise and will give you high confidence in your pixel-walking code, which is what you want. You might be able to use a debugger to do this, or else write a simple loop with printf over the image bytes.

Resources