#include <GL/glut.h>
void reshape(int w, int h){
glViewport(0, 0, w, h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(-w/2, w - w/2, -h/2, h - h/2);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void display(){
glClear(GL_COLOR_BUFFER_BIT);
glColor3f(1, 1, 1);
glPushAttrib(GL_ALL_ATTRIB_BITS);
glColor3f(1, 0, 0);
glPopAttrib();
glRecti(0, 0, 10, 10); // draws white rect
// Commenting the line makes next rect white
// Uncommenting the line makes next rect red
glTranslatef(0, 0, 0);
glPushAttrib(GL_ALL_ATTRIB_BITS);
glColor3f(1, 0, 0);
glPopAttrib();
glRecti(20, 20, 30, 30); // draws white or red rect
glutSwapBuffers();
}
int main (int argc, char * argv[]){
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE|GLUT_RGBA);
glutInitWindowSize(800, 600);
glutCreateWindow("OpenGL lesson 1");
glutReshapeFunc(reshape);
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
The code above is full compilable programm that renders two rectangles. The programm produce different results depending on weather line "glTranslatef(0, 0, 0);" commented or not. Is that a bug, or misusage of OpenGL?
Is that a bug, or misusage of OpenGL?
That is just a bug. The spec clearly states that glColor will set the current RGBA color value, which will become the vertex's color the next time a vertex is formed. This would happen by the next glVertex call inside a glBegin/glEnd block. glRect is specified to be equivalent to glBegin(); glVertex() [4x]; glEnd().
The current RGBA color value is part of the GL_CURRENT_BIT attribute group, and is of course included in GL_ALL_ATTRIB_BITS. glTranslate is to only affect the top element of the currenttly selected matrid stack. The correct output for this code are two wihite rectangles, no matter if a glTranslate is there or not.
However, all this stuff is horribly outdated, and deprecated since 2008.
Related
(code snippet. I know it's ugly but i wanted to make it work before making it better so please don't pay too much attention to the structure)
I modified slightly the glfw example present in the documentation to have a triangle that rotates when pressing the right arrow key and draws a circle described by the position of one of his vertices (the blue one in this case).
I clear the GL_COLOR_BUFFER_BIT only when initializing the window to avoid having to store all the coordinates that will be needed to draw the line (they would be hundreds of thousands in the final program), that means that on the screen every time i press the right arrow a "copy" of the triangle is draws rotated by 12 degrees and a line is drawn that connects the old blue angle position to the new one.
The problem now is that i would want to be able to press the escape key GLFW_KEY_ESCAPE and "delete" the triangles while keeping the lines drawn.
I tried using a z-buffer to hide the triangles behind a black rectangle but only the last line drawn is visualized (i think this is because opengl doesn't know the z of the previous lines since i don't store them).
Is there a way to do what i want without having to store all the point coordinates and then clearing the whole screen and redrawing only the lines? If this is the case, what would be the best way to store them?
Here is part of the code i have so far.
bool check = 0;
Vertex blue = {0.f, 0.6f, 0.5f};
Vertex green = {0.6f,-0.4f, 0.5f};
Vertex red = {-0.6f, -0.4f, 0.5f};
Vertex line = {0.f, 0.6f, 0.f};
Vertex line2 = {0.f, 0.6f, 0.f};
static void
key_callback(GLFWwindow *window, int key, int scancode, int action, int mods) {
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
check = !check;
if (key == GLFW_KEY_RIGHT && action == GLFW_PRESS) {
line.x = line2.x;
line.y = line2.y;
rotation -= 12;
rad = DegToRad(-12);
double x = line.x*cos(rad) - line.y * sin(rad);
double y = line.y * cos(rad) + line.x * sin(rad);
line2.x = x;
line2.y = y;
}
int main(void) {
GLFWwindow *window;
glfwSetErrorCallback(error_callback);
if (!glfwInit())
exit(EXIT_FAILURE);
window = glfwCreateWindow(1280, 720, "Example", NULL, NULL);
if (!window) {
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(window);
glfwSetKeyCallback(window, key_callback);
glClear(GL_COLOR_BUFFER_BIT);
while (!glfwWindowShouldClose(window)) {
glPolygonMode(GL_FRONT_AND_BACK,GL_LINE);
float ratio;
int width, height;
glfwGetFramebufferSize(window, &width, &height);
ratio = width / (float) height;
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-ratio, ratio, -1.f, 1.f, 1.f, -1.f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glRotatef(rotation, 0.f, 0.f, 1.f);
glBegin(GL_TRIANGLES);
glColor3f(1.f, 0.f, 0.f);
glVertex3f(red.x, red.y, red.z);
glColor3f(0.f, 1.f, 0.f);
glVertex3f(green.x, green.y, green.z);
glColor3f(0.f, 0.f, 1.f);
glVertex3f(blue.x, blue.y, blue.z);
glEnd();
glLoadIdentity();
glLineWidth(1.0);
glColor3f(1.0, 0.0, 0.0);
glBegin(GL_LINES);
glVertex3f(line.x, line.y, line.z);
glVertex3f(line2.x, line2.y, line2.z);
glEnd();
if (check){
//hide the triangles but not the lines
}
glEnd();
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
I clear the GL_COLOR_BUFFER_BIT only when initializing the window
That's your problem right there. It's idiomatic in OpenGL to always start with a clear operation of the main framebuffer color bits. That is, because you don't know the state of your window main framebuffer when the operating system is asking for a redraw. For all you know it could have been all replaced with cat pictures in the background without your program knowing it. Seriously: If you have a cat video running and the OS felt the need to rearrange your window's main framebuffer memory this is what you might end up with.
Is there a way to do what i want without having to store all the point coordinates and then clearing the whole screen and redrawing only the lines?
For all intents and purposes: No. In theory one could come up with a contraption made out of a convoluted series of stencil buffer operations to implement that, but this would be barking up a very wrong tree.
Here's something for you to try out: Draw a bunch of triangles like you do, then resize your window down so there nothing remains, then resize it back to its original sizeā¦ you see where the problem? There's a way to address this particular problem, but that's not what you should do here.
The correct thing is to redraw everything. If you feel that that's to slow you have to optimize your drawing process. On current generation hardware it's possible to churn out on the order of 100 million triangles per second.
I have the following cairo code:
cairo_set_source_rgba(cr, 1, 1, 1, 1);
cairo_rectangle(cr, 0, 0, WINDOW_SIZE, WINDOW_SIZE);
cairo_fill(cr);
cairo_scale(cr, 8, 8);
draw_image(cr, "q.png", 5, 5);
And
void draw_image(cairo_t* cr, char* img_name, int x, int y)
{
cairo_translate(cr, x, y);
cairo_surface_t* img = cairo_image_surface_create_from_png(img_name);
cairo_set_source_surface(cr, img, 0, 0);
cairo_paint(cr);
cairo_translate(cr, -x, -y);
}
q.png is a 5x5 image:
But when the program is run, the image is slightly blurred:
I have already tried
cairo_set_antialias(cr, CAIRO_ANTIALIAS_NONE);
but it does not work.
Is there any way to fix this problem?
This is because of how the image is scaled up. Instead of setting a source surface directly, create a pattern out of the surface with cairo_pattern_create_for_surface(), call cairo_pattern_set_filter() on it to set the scaling mode, and then call cairo_set_source() to load the pattern. See the documentation for cairo_filter_t for the scaling modes. CAIRO_FILTER_NEAREST, for example, will give you a normal pixel zoom with no blurring or other transformations.
I've written a small tiling game engine with OpenGL and C, and I can't seem to figure out what the problem is. My main loop looks like this:
void main_game_loop()
{
(poll for events and respond to them)
glClear(GL_COLOR_BUFFER_BIT);
glPushMatrix();
draw_block(WALL, 10, 10);
}
draw_block:
void draw_block(block b, int x, int y)
{
(load b's texture from a hash and store it in GLuint tex)
glPushMatrix();
glTranslatef(x, y, 0);
glBindTexture(GL_TEXTURE_2D, tex);
glBegin(GL_QUADS);
//BLOCK_DIM is 32, the width and height of the texture
glTexCoord2i(0, 0); glVertex3f(0, 0, 0);
glTexCoord2i(1, 0); glVertex3f(BLOCK_DIM, 0, 0);
glTexCoord2i(1, 1); glVertex3f(BLOCK_DIM, BLOCK_DIM, 0);
glTexCoord2i(0, 1); glVertex3f(0, BLOCK_DIM, 0);
glEnd();
glPopMatrix;
}
initialization function: (called before main_game_loop)
void init_gl()
{
glViewport(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, SCREEN_WIDTH, SCREEN_HEIGHT, 0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glClearColor(0, 0, 0, 0);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
}
When run, this displays a black screen. However, if I remove the glViewport call, it seemingly displays the texture, but huge and in the corner of the window. Screenshot:
The texture IS being drawn correctly, because if I scale out by a huge factor, I can see the entire image. The y-axis also seems to be flipped from what I used in the gluOrtho2D call (discovered by making events add or subtract from x/y coordinates of the image, subtracting from the y coordinate causes the image to move downward). I'm starting to get frustrated, because this is the simplest possible example I can think of. I'm using SDL, and am passing SDL_OPENGL to SDL_SetVideoMode. What is going on here?
Looks like a problem with glViewport, but just to be sure, did you try clearing the color buffer to purple?
I've always thought of glViewport as a video/windowing function, not actually part of OpenGL itself, because it is the intermediate between the window manager and the OpenGL subsystem, and it uses window coordinates. As such, you should probably look at it along with the other SDL video calls. I suggest updating the question with the full code, or at least with those parts relevant to the video/window subsystem.
Or is it that you omitted to call glViewport after a resize?
You should also try your code without SDL_FULLSCREEN and/or with a smaller window. I usually start with a 512x512 or 640x480 window until I get the viewport and some basic controls right.
the first two parameters of glViewPort specifies the lower left of the view
http://www.opengl.org/sdk/docs/man/xhtml/glViewport.xml
You can try
glViewport(0, SCREEN_HEIGHT, SCREEN_WIDTH, SCREEN_HEIGHT);
For gluOrtho2D, the parameters are left, right, top, bottom
so I would probably use
gluOrtho2D(0, SCREEN_WIDTH, 0, SCREEN_HEIGHT);
In this code I try to draw a cube.I try to draw all faces vertices anticlockwise.
The problem is that if I don't rotate the cube only the red face is drawn, if instead I rotate it of 5 degrees, I just see a part of the cube.
#import <OpenGL/OpenGL.h>
#import <GLUT/GLUT.h>
int width=500, height=500, depth=500;
void init()
{
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(200, 200,-200, 200, 200, 0, 0, 1, 0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0,width,0,height);
gluPerspective(90, 1, -100, 100);
glViewport(0, 0, width, height);
}
void drawCube()
{
int vertices[8][3]= { {100,100,0} , {300,100,0}, {300,300,0}, {100,300,0}, {100,100,300} , {300,100,300}, {300,300,300}, {100,300,300} };
glBegin(GL_QUADS);
glColor4f(1, 0, 0, 0);
glVertex3iv(vertices[0]);
glVertex3iv(vertices[1]);
glVertex3iv(vertices[2]);
glVertex3iv(vertices[3]);
glVertex3iv(vertices[4]);
glVertex3iv(vertices[5]);
glVertex3iv(vertices[6]);
glVertex3iv(vertices[7]);
glColor4f(0, 1, 0, 0);
glVertex3iv(vertices[1]);
glVertex3iv(vertices[5]);
glVertex3iv(vertices[6]);
glVertex3iv(vertices[4]);
glVertex3iv(vertices[0]);
glVertex3iv(vertices[4]);
glVertex3iv(vertices[7]);
glVertex3iv(vertices[3]);
glColor4f(0, 0, 1, 0);
glVertex3iv(vertices[3]);
glVertex3iv(vertices[2]);
glVertex3iv(vertices[6]);
glVertex3iv(vertices[7]);
glVertex3iv(vertices[0]);
glVertex3iv(vertices[1]);
glVertex3iv(vertices[5]);
glVertex3iv(vertices[4]);
glEnd();
}
void display()
{
glClearColor(0.0, 0.0, 0.0, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(200,200,150);
glRotatef(5, 0, 1, 0);
glTranslatef(-200,-200,-150);
drawCube();
glutSwapBuffers();
}
void idle(void)
{
}
int main(int argc, char * argv[])
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowPosition(100, 100);
glutInitWindowSize(width, height);
glutCreateWindow("Test");
glutDisplayFunc(display);
glutIdleFunc(idle);
init();
glutMainLoop();
return 0;
}
This is what I see:
But I should see a rotated cube, so I should see the part of the other face on the right.My doubt is that I'm going wrong with drawing the vertices in anticlockwise order, or something else.
PS: the code is outdated, because at my university I don't have the possibility to study the newest version of OpenGL, and I must use GLUT.
Couple problems:
Your projection matrix setup is not sensical.
Firstly, you should decide if you want an orthographic, or a perspective projection.
If you want orthographic, use gluOrtho2d. If you want a perspective projection, use gluPerspective. Using both will generate a bizarre transformation that's certainly not what you want.
gluPerspective can't have a negative near plane. The near plane should be greater than zero, perhaps something small like 1, with a far plane defining how far away from the camera you want the back clip plane to be. Since you seem to be using units in the hundreds, I might recommend a back plane of 1000 or so.
You're calling gluLookAt, but erasing the view matrix by calling glLoadIdentity in display(). If you want a view matrix, don't erase it after you program it.
I am trying to get my VBO to draw, but I can't see anything. I am attempting to draw a single triangle (seems to me that a single triangle is a good start in the right direction). Everything compiles and runs without breaking.
void initGraphics(int width, int height) {
glViewport(0, 0, width, height);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glClearColor(1.0, 1.0, 1.0, 1.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, width, height, 0.0);
glMatrixMode(GL_MODELVIEW);
}
GLuint vboId;
void initVbo(void) {
GLsizei dataSize;
GLfloat* vertices;
int vCount = 3;
dataSize = sizeof(GLfloat) * 3 * vCount;
vertices = (GLfloat*)malloc(dataSize);
vertices[0] = 0;
vertices[1] = 0;
vertices[2] = 0;
vertices[3] = 100;
vertices[4] = 0;
vertices[5] = 0;
vertices[6] = 100;
vertices[7] = 100;
vertices[8] = 0;
glewInit();
glGenBuffersARB(1, &vboId);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vboId);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, dataSize, vertices, GL_STATIC_DRAW_ARB);
free(vertices);
//glDeleteBuffersARB(1, &vboId); // edit #1
}
unsigned int indices[] = { 0, 1, 2 }; // edit #3
void drawVbo(void) {
glClear(GL_COLOR_BUFFER_BIT);
glClearColor(1.0, 1.0, 1.0, 1.0);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vboId);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, 0);
//edit #1, #2
glDrawElements(GL_TRIANGLES, 3 /*1*/, /*GL_UNSIGNED_BYTE*/ GL_UNSIGNED_INT, &indices);
glDisableClientState(GL_VERTEX_ARRAY);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
}
These are the functions that are getting called. I am running on windows and I am avoiding posting a bunch of windows related code...
EDIT #1
Modified the code to reflect genpfault's suggestions.
EDIT #2
Modified the code to reflect Nicol Bolas' suggestions.
COMMENT #1
The following code works (just to prove projections are set up correct):
glBegin(GL_TRIANGLES);
glVertex3f(0.0, 0.0, 0.0);
glVertex3f(100.0, 0.0, 0.0);
glVertex3f(100.0, 100.0, 0.0);
glEnd();
EDIT #3
Modified the code to reflect Nicol Bolas' suggestions.
UPDATE #1
The modified code now works. Although I am curious how to get glDrawArrays to work properly...my implementation looked like:
glDrawArrays(GL_TRIANGLES, 0, 0);
This doesn't seem right to me, but the spec says:
mode: Specifies what kind of primitives to render. Symbolic constants GL_POINTS, GL_LINE_STRIP, GL_LINE_LOOP, GL_LINES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN, GL_TRIANGLES, GL_QUAD_STRIP, GL_QUADS, and GL_POLYGON are accepted.
first: Specifies the starting index in the enabled arrays.
count: Specifies the number of indices to be rendered.
Based on what Nicol Bolas was saying, since I shouldn't need indices, 0, 0 make sense as arguments. Right?
glGenBuffersARB(1, &vboId);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vboId);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, dataSize, vertices, GL_STATIC_DRAW_ARB);
free(vertices);
glDeleteBuffersARB(1, &vboId); // wat
After the glDeleteBuffersARB() call the pointed-to VBO ID(s) are invalidated and can't be used in glBindBufferARB().
Also:
glDrawElements(GL_TRIANGLES, 1, GL_UNSIGNED_BYTE, &indices);
You've defined indices as a unsigned int so you should use GL_UNSIGNED_INT instead of GL_UNSIGNED_BYTE.
glDrawElements(GL_TRIANGLES, 1, GL_UNSIGNED_INT, &indices);
Triangles, as you may be aware, are made of three vertices. You are sending one (the second parameter). You can't draw a triangle from one position.