In my application (running on Mali-400 GPU) I am using OpenGL ES 2.0 to draw UI. For that I setup orthogonal projection matrix and use 2D vectors to describe the geometry (only x, y attributes).
So my vertex structure looks something like this:
struct my_vertext {
struct vec2 pos;
struct unsigned int color;
}
later then I prepare GL context, setup shaders:
vertex:
uniform mat4 matrix;
attribute vec2 pos;
attribute vec4 color;
varying vec4 frag_color;
void main() {
frag_color = color;
gl_Position = matrix * vec4(pos.xy, 0, 1);
};
fragment:
precision mediump float;
varying vec4 frag_color;
void main() {
gl_FragColor = frag_color;
};
and bind them with attribute arrays:
GLuint prog = glCreateProgram(); CHECK_GL;
// create shaders, load source and compile them
...
/// binding with attributes
GLuint attrib_pos, attrib_col, vertex_index = 0;
glBindAttribLocation(prog, vertex_index, "pos"); CHECK_GL;
attrib_pos = vertex_index++;
glBindAttribLocation(prog, vertex_index, "color"); CHECK_GL;
attrib_col = vertex_index++;
// link program
glLinkProgram(prog); CHECK_GL;
glUseProgram(prog); CHECK_GL;
When I render my geometry (for simplicity I am drawing 2 rectangles one behind another) - only the first call to glDrawElements produces an image on the screen (dark gray rectangle), the second (red) - doesn`t.
For rendering I am using Vertex Array Object with 2 bound buffers - one for geometry (GL_ARRAY_BUFFER) and the second one for indices (GL_ELEMENT_ARRAY_BUFFER). All the geometry is placed to this buffers and later is drawn with glDrawElements calls providing needed offsets.
My drawing code:
glBindVertexArray(vao); CHECK_GL;
...
GLuint *offset = 0;
for each UI object:
{
glDrawElements(GL_TRIANGLES, (GLsizei)ui_object->elem_count,
GL_UNSIGNED_SHORT, offset); CHECK_GL;
offset += ui_object->elem_count;
}
This puzzles me a lot since I check each and every return code of glXXX functions and all of them return GL_NO_ERROR. Additionally, I ran my program in Mali Graphics Debugger and the latter hasn`t revealed any problems/errors.
Geometry and indices for both calls (obtained from Mali Graphics Debugger):
First rectangle geometry (which is shown on screen):
0 Position=[30.5, 30.5] Color=[45, 45, 45, 255]
1 Position=[1250.5, 30.5] Color=[45, 45, 45, 255]
2 Position=[1250.5, 690.5] Color=[45, 45, 45, 255]
3 Position=[30.5, 690.5] Color=[45, 45, 45, 255]
Indices: [0, 1, 2, 0, 2, 3]
Second rectangle geometry (which isnt` shown on screen):
4 Position=[130.5, 130.5] Color=[255, 0, 0, 255]
5 Position=[230.5, 130.5] Color=[255, 0, 0, 255]
6 Position=[230.5, 230.5] Color=[255, 0, 0, 255]
7 Position=[130.5, 230.5] Color=[255, 0, 0, 255]
Indices: [4, 5, 6, 4, 6, 7]
P.S.: On my desktop everything works perfectly. I suppose it has something to do with limitations/peculiarities of embedded Open GL.
On my desktop:
$ inxi -F
$ ....
$ GLX Renderer: Mesa DRI Intel Ivybridge Desktop GLX Version: 3.0 Mesa 10.1.3
I know very little of OpenGL ES, but this part looks wrong:
glBindVertexArray(vao); CHECK_GL;
...
GLuint *offset = 0;
for each UI object:
{
glDrawElements(GL_TRIANGLES, (GLsizei)ui_object->elem_count,
GL_UNSIGNED_SHORT, offset); CHECK_GL;
offset += ui_object->elem_count;
}
compared to
glBindVertexArray(vao); CHECK_GL;
...
GLuint offset = 0;
for each UI object:
{
glDrawElements(GL_TRIANGLES, (GLsizei)ui_object->elem_count,
GL_UNSIGNED_SHORT, &offset); CHECK_GL;
offset += ui_object->elem_count;
}
Related
I'm adding transformations to my C OpenGL program. I'm using CGLM as my maths library. The program has no warnings or errors. Still however, when I compile and run the program, I get a distorted version of my intended image (it was not distorted before adding transformations).
The following is my program's main loop:
// Initialize variables for framerate counting
double lastTime = glfwGetTime();
int frameCount = 0;
// Program loop
while (!glfwWindowShouldClose(window)) {
// Calculate framerate
double thisTime = glfwGetTime();
frameCount++;
// If a second has passed.
if (thisTime - lastTime >= 1.0) {
printf("%i FPS\n", frameCount);
frameCount = 0;
lastTime = thisTime;
}
processInput(window);
// Clear the window
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Bind textures on texture units
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture2);
// Create transformations
mat4 transform = {{1.0f}};
glm_mat4_identity(transform);
glm_translate(transform, (vec3){0.5f, -0.5f, 0.0f});
glm_rotate(transform, (float)glfwGetTime(), (vec3){0.0f, 0.0f, 1.0f});
// Get matrix's uniform location and set matrix
shaderUse(myShaderPtr);
GLint transformLoc = glGetUniformLocation(myShaderPtr->shaderID, "transform");
// mat4 transform;
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, (float*)transform);
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glfwSwapBuffers(window); // Swap the front and back buffers
glfwPollEvents(); // Check for events (mouse movement, mouse click, keyboard press, keyboard release etc.)
}
The Program is up on github here if you'd like to check out the full code.
The Output of the program is this (The square also rotates):
However, the intended output of the program is the penguin at 20% opacity on top and the box at 100% opacity underneath the penguin.
In the vertex shader, the location of the texture coordinate is 1:
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec2 aTexCoord;
However, when you specify the vertices, location 1 is used for the color attribute and position 2 for the text coordinates:
// Colour attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
// Texture coord attribute
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(2);
Remove the color attribute and use location 1 for the texture coordinates. e.g.:
// Texture coord attribute
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(1);
Looking at your source code, you're passing in three attributes (position, color and texture coordinates), but your vertex shader only takes two.
Removing the color attribute and instead passing the texture coordinates as attribute #1 instead of #2 should make it look like intended.
I would like to efficiently perform a mapping from integers to colors on a buffer sent to the GPU as a texture object.
Currently my code looks like this:
// setup the rendering pipeline and init the texture object
glBindTexture(glTEXTURE_2D, texture);
// data is an array of integers ranging from 0 to 3 of length 144 * 160 like this: [0, 1, 2, 3, 3, 2, 1, ..., 1]
glTexImage2D(glTexture_2D, 0, /* not sure what to put here */, 160, 144, glRGB, /* also not sure what to put here */, data);
I want to be able to map the numbers from 0 to 3 into a RGB value.
I feel like I should be able to do this in a fragment shader, but how should I pass my data into the shader?
My fragment shader looks like this:
#version 330 core
out vec4 FragColor;
in vec2 TexCoord;
uniform sampler2D screen;
void main()
{
/*
* I would really like to be able to get the byte value
* at the texture's coordinate that we are processing and
* use an if ... else if ... else ... block to map the texture
*/
// FragColor = texture(screen, TexCoord);
}
I am trying to add textures to a cylinder to draw a stone well. I'm starting with a cylinder and then mapping a stone texture I found here but am getting some weird results. Here is the function I am using:
void draw_well(double x, double y, double z,
double dx, double dy, double dz,
double th)
{
// Set specular color to white
float white[] = {1,1,1,1};
float black[] = {0,0,0,1};
glMaterialfv(GL_FRONT_AND_BACK,GL_SHININESS,shinyvec);
glMaterialfv(GL_FRONT_AND_BACK,GL_SPECULAR,white);
glMaterialfv(GL_FRONT_AND_BACK,GL_EMISSION,black);
glPushMatrix();
// Offset
glTranslated(x,y,z);
glRotated(th,0,1,0);
glScaled(dx,dy,dz);
// Enable textures
glEnable(GL_TEXTURE_2D);
glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_MODULATE);
glBindTexture(GL_TEXTURE_2D,texture[0]); // Stone texture
glBegin(GL_QUAD_STRIP);
for (int i = 0; i <= 359; i++)
{
glNormal3d(Cos(i), 1, Sin(i));
glTexCoord2f(0,0); glVertex3f(Cos(i), -1, Sin(i));
glTexCoord2f(0,1); glVertex3f(Cos(i), 1, Sin(i));
glTexCoord2f(1,1); glVertex3f(Cos(i + 1), 1, Sin(i + 1));
glTexCoord2f(1,0); glVertex3f(Cos(i + 1), -1, Sin(i + 1));
}
glEnd();
glPopMatrix();
glDisable(GL_TEXTURE_2D);
}
// Later down in the display function
draw_well(0, 0, 0, 1, 1, 1, 0);
and the output I receive is
I'm still pretty new to OpenGL and more specifically textures so my understanding is pretty limited. My thought process here is that I would map the texture to each QUAD used to make the cylinder, but clearly I am doing something wrong. Any explanation on what is causing this weird output and how to fix it would be greatly appreciated.
There are possibly three main issues with your draw routine. quad-strip indexing, texture coordinates repeating too often and possible incorrect usage of the trig functions;
Trigonometric functions usually accept values which represent angles expressed in radians and not degrees. Double check what the parameters of the Sin and Cos functions you are using.
Quadstrip indexing is incorrect. Indexing should go like this...
Notice how the quad is defined in a clock-wise fashion, however the diagonal vertices are defined sequentially. You are defining the quad as v0, v1, v3, v2 instead of v0, v1, v2, v3 so swap the last two vertices of the four. This also leads to another error in not sharing the vertices correctly. You are duplicating them along each vertical edge since you draw the same set of vertices (i+1) in one loop, as you do in the next (i.e since i has now been incremented by 1).
Texture coordinates are in the range from 0, 1 for each quad which means you are defining a cylinder which is segmented 360 times and this texture is repeated 360 times around the cylinder. I'm assuming the texture should be mapped 1:1 to the Cylinder and not repeated?
Here is some example code using what you provided. I have reduced the number of segments down to 64, if you wish to still have 360 then ammend numberOfSegments accordingly.
float pi = 3.141592654f;
unsigned int numberOfSegments = 64;
float angleIncrement = (2.0f * pi) / static_cast<float>(numberOfSegments);
float textureCoordinateIncrement = 1.0f / static_cast<float>(numberOfSegments);
glBegin(GL_QUAD_STRIP);
for (unsigned int i = 0; i <= numberOfSegments; ++i)
{
float c = cos(angleIncrement * i);
float s = sin(angleIncrement * i);
glTexCoord2f( textureCoordinateIncrement * i, 0); glVertex3f( c, -1.0f, s);
glTexCoord2f( textureCoordinateIncrement * i, 1.0f); glVertex3f( c, 1.0f, s);
}
glEnd();
N.BYou are using an old version of OpenGL (the use of glBegin/glVertex etc).
I am trying to rotate a texture extracted from a video frame (provided by ffmpeg), I have tried the following code :
glTexSubImage2D(GL_TEXTURE_2D,
0,
0,
0,
textureWidth,
textureHeight,
GL_RGBA,
GL_UNSIGNED_BYTE,
//s_pixels);
pFrameConverted->data[0]);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glTranslatef(0.5,0.5,0.0);
glRotatef(90,0.0,0.0,1.0);
glTranslatef(-0.5,-0.5,0.0);
glMatrixMode(GL_MODELVIEW);
//glDrawTexiOES(-dPaddingX, -dPaddingY, 0, drawWidth + 2 * dPaddingX, drawHeight + 2 * dPaddingY);
glDrawTexiOES(0, 0, 0, drawWidth, drawHeight);
The image is not rotated, do you see the problem ?
From the GL_OES_draw_texture extension specification:
Note also that s, t, r, and q are computed for each fragment as part of DrawTex rendering. This implies that the texture matrix is ignored and has no effect on the rendered result.
You are trying to transform the texture coordinates using the fixed-function texture matrix, but like point sprites, those coordinates are generated per-fragment rather than per-vertex. Thus, that means that nothing you do to the texture matrix is ever going to affect the output of glDrawTexiOES (...).
Consider using a textured quad instead, those will pass through the traditional vertex processing pipeline.
I've been working on a bitmap loader, with the main goal to do nothing more than parse the data properly and render it in OpenGL. I'm at the point where I need to draw the pixels on an x/y (i.e., pixel by pixel) basis (at least, this is what I think I need to do as far as rendering is concerned). I've already bound the texture object and called glTexImage2D(...).
Currently, what I'm having trouble with is the pixel by pixel algorithm.
As far as I understand it, bitmap (aka DIB) files store color data in what is known as the pixel array. Each row of pixels consists of x amount of bytes, with each pixel holding a byte count divisible either by 4 ( 32 bits per pixel ), 3 ( 24 bits per pixel ), 2 ( 16 bits per pixel ), or 1 ( 8 bits per pixel ).
I think need to loop through the pixels while at the same time calculating the right offset within the pixel array, which is relative to its pixel x/y coordinate. Is this true, though? If not, what should I do? I'm honestly slightly confused as to whether or not, despite doing what was directed to me in this question I asked sometime ago, this approach is correct.
I assume that going about it on a pixel by pixel basis is the right approach, mainly because
rendering a quad with glVertex* and glTexCoord* produced nothing more than a grayed out rectangle (at the time I thought the OpenGL would handle this by itself, hence why attempting that in the first place).
I should also note that, while my question displays OpenGL 3.1 shaders, I moved to SDL 1.2
so I could just use immediate mode for the time being until I got the right algorithms implemented, and then switch back to modern GL.
The test image I'm parsing:
It's data output (pastebinned due to its very long length):
http://pastebin.com/6RVhAVRq
And The Code:
void R_RenderTexture_PixByPix( texture_bmp_t* const data, const vec3 center )
{
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
glBegin( GL_POINTS );
{
const unsigned width = data->img_data->width + ( unsigned int ) center[ VEC_X ];
const unsigned height = data->img_data->height + ( unsigned int ) center[ VEC_Y ];
const unsigned bytecount = GetByteCount( data->img_data->bpp );
const unsigned char* pixels = data->img_data->pixels;
unsigned color_offset = 0;
unsigned x_pixel;
for ( x_pixel = center[ VEC_X ]; x_pixel < width; ++x_pixel )
{
unsigned y_pixel;
for ( y_pixel = center[ VEC_Y ]; y_pixel < height; ++y_pixel )
{
}
const bool do_color_update = true; //<--- replace true with a condition which checks to see if the color needs to be updated.
if ( do_color_update )
{
glColor3fv( pixels + color_offset );
}
color_offset += bytecount;
}
}
glEnd();
glBindTexture( GL_TEXTURE_2D, 0 );
}
You're completely missing the point of a OpenGL texture in your code. The texture holds the image for you and the rasterizer does all the iterations over the pixel data for you. No need to write a slow pixel-pusher loop yourself.
As your code stands right now that texture is completely bogus and does nothing. You could completely omit the calls to glBindTexture and it'd still work – or not, because you're not actually drawing anything, you just set the glColor state. To draw something you'd have to call glVertex.
So why not leverage the pixel-pushing performance of modern GPUs and actually use a texture? How about this:
void R_RenderTexture_PixByPix( texture_bmp_t* const data, const vec3 center )
{
if( 0 == data->texbuf_id ) {
glGenTextures(1, &(data->texbuf_id));
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// there are a few more, but the defaults are ok
// if you didn't change them no need for further unpack settings
GLenum internal_format;
GLenum format;
GLenum type;
switch(data->img_data->bpp) {
case 8:
// this could be a palette or grayscale
internal_format = GL_LUMINANCE8;
format = GL_LUMINANCE;
type = GL_UNSIGNED_BYTE;
break;
case 15:
internal_format = GL_RGB5;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_SHORT_1_5_5_5;
break;
case 16:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_SHORT_5_6_5;
break;
case 24:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_BYTE;
break;
case 32:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_INT_8_8_8_8;
break;
}
glTexImage2D( GL_TEXTURE_2D, 0, internal_format,
data->img_data->width, data->img_data->height, 0,
format, type, data->img_data->pixels );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
} else {
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
}
static GLfloat verts[] = {
0, 0,
1, 0,
1, 1,
0, 1
};
// the following is to address texture image pixel centers
// tex coordinates 0 and 1 are not on pixel centers!
float const s0 = 1. / (2.*tex_width);
float const s1 = ( 2.*(tex_width-1) + 1.) / (2.*tex_width);
float const t0 = 1. / (2.*tex_height);
float const t1 = ( 2.*(tex_height-1) + 1.) / (2.*tex_height);
GLfloat texcoords[] = {
s0, t0,
s1, t0,
s1, t1,
s0, t1
};
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
glVertexPointer(2, GL_FLOAT, 0, verts);
glTexCoordPointer(2, GL_FLOAT, 0, texcoords);
glColor4f(1., 1., 1., 1.);
glDrawArrays(GL_QUADS, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glBindTexture( GL_TEXTURE_2D, 0 );
}
Your intuition is basically correct. The pixels are stored as an array of bytes, but the bytes are arranged into consecutive groups, with each group representing a single pixel. To address a single pixel, you'll need to do a calculation like this:
unsigned char* image_data = start_of_pixel_data;
unsigned char* pixel_addr = image_data + bytes_per_pixel * (y * width_in_pixels + x);
Be careful about the width in pixels, as sometimes there is padding at the end of the row to bring the total row width in bytes up to a multiple of 4/8/16/32/64/etc. I recommend looking at the actual bytes of the bitmap in hex first to get a sense of what is going on. It's a great learning exercise and will give you high confidence in your pixel-walking code, which is what you want. You might be able to use a debugger to do this, or else write a simple loop with printf over the image bytes.