Have to load view matrix twice. Don't know why - c

This is the weirdest bug i have ever encountered. My triangle will not be drawn unless I Load the view matrix twice. What is going on?
If i remove one of the gfxSetCamera(&camera->t); nothing is drawn.
void shipDraw() {
glPushMatrix();
glMultMatrixf(ship->t.m);
glBegin(GL_TRIANGLES);
glColor3f(0,0,1);
glVertex3f(0, 0, -1);
glVertex3f(0, 1, 1);
glVertex3f(0, -1, 1);
glEnd();
glPopMatrix();
}
//Draw loop
while(!gfxUserQuit()) {
entUpdateAll();
gfxPrepare(1,1);
gfxSetCamera(&camera->t); //Have to call this twice!?!?
gfxSetCamera(&camera->t);
entDrawAll();
gfxPresent();
}
//Graphics code
void gfxPrepare(int clearColor, int clearStencil) {
//TODO parse args
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
}
void gfxPresent() {
glfwSwapBuffers();
}
void gfxSetCamera(transform *t) {
float *m = t->m;
float viewmatrix[16]={
m[0], m[4], m[8], 0,
m[1], m[5], m[9], 0,
m[2], m[6], m[10], 0,
-(m[0]*m[12] +
m[1]*m[13] +
m[2]*m[14]),
-(m[4]*m[12] +
m[5]*m[13] +
m[6]*m[14]),
-(m[8]*m[12] +
m[9]*m[13] +
m[10]*m[14]), 1};
glLoadMatrixf(viewmatrix);
}
EDIT: I have reduced the error down to the following
while(!gfxUserQuit()) {
entUpdateAll();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
float *m = camera->t.m;
float viewmatrix[16]={
m[0], m[4], m[8], 0,
m[1], m[5], m[9], 0,
m[2], m[6], m[10], 0,
-(m[0]*m[12] +
m[1]*m[13] +
m[2]*m[14]),
-(m[4]*m[12] +
m[5]*m[13] +
m[6]*m[14]),
-(m[8]*m[12] +
m[9]*m[13] +
m[10]*m[14]), 1};
float viewmatrix2[16]={
m[0], m[4], m[8], 0,
m[1], m[5], m[9], 0,
m[2], m[6], m[10], 0,
-(m[0]*m[12] +
m[1]*m[13] +
m[2]*m[14]),
-(m[4]*m[12] +
m[5]*m[13] +
m[6]*m[14]),
-(m[8]*m[12] +
m[9]*m[13] +
m[10]*m[14]), 1};
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glLoadMatrixf(viewmatrix2);
entDrawAll();
glfwSwapBuffers();
}
If i remove the definition and initialization of 'float viewmatrix' nothing is draw to screen. If i restore it it renders as normal. Is this some memory corruption or something?
EDIT2 they are different.
How can i debug this?
EDIT3 I was calling glfwGetTime() without including the file. This is what was breaking it :O

What if, from your second example, you replaced "float viewmatrix...;" with "char tmpbuffer[16 * sizeof(float);" and then (assuming that did nothing) added "memset(tmpbuffer, 0, sizeof(tmpbuffer));"?
It sounds like something is corrupting your stack and that the first viewmatrix definition "fixes" it for the second one. What I'm suggesting should verify the problem is something along this line.

Related

Why is 3D projection in OpenGL working, but leaving a trail behind?

I just did some math from Wikipedia for 3D projection because I noticed they were simple, library not needed. It does work but, the cube leaves a trail behind as it moves. Note that the cube doesn't actually move, I am actually changing the camera position which makes he cube look like it's moving.
There's no need to point out 100 bad practices that I am doing, I'm aware, this is just supposed to be be a quick test.
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <glad/glad.h>
#include <SDL2/SDL.h>
#include <SDL2/SDL_image.h>
#include <SDL2/SDL_opengl.h>
#include <math.h>
#include "utils.h"
#include "keys.h"
char p = 1;
typedef struct Vec3 {
float x;
float y;
float z;
} Vec3;
void Mat_iden(float *m, Uint32 s) {
Uint32 i = 0;
Uint32 unt = s + 1;
while (i < s) {
m[unt * i] = 1;
i++;
}
}
float one[3][3];
float two[3][3];
float three[3][3];
int main() {
SDL_Init(SDL_INIT_VIDEO);
IMG_Init(IMG_INIT_PNG);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 5);
SDL_Window *w = SDL_CreateWindow("Snapdoop", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, 500, 500, SDL_WINDOW_OPENGL);
SDL_GLContext c = SDL_GL_CreateContext(w);
gladLoadGLLoader((GLADloadproc)SDL_GL_GetProcAddress);
Mat_iden(one[0], 3);
Mat_iden(two[0], 3);
Mat_iden(three[0], 3);
Shader s[2];
s[0] = Shade("/home/shambhav/eclipse-workspace/Snadoop/src/vs.glsl");
s[1] = Shade("/home/shambhav/eclipse-workspace/Snadoop/src/fs.glsl");
Shade_comp(&s[0], GL_VERTEX_SHADER);
Shade_comp(&s[1], GL_FRAGMENT_SHADER);
Program sp;
Prog_attach(&sp, s, 2);
printf("VS: %s\n", s[0].info);
printf("FS: %s\n", s[1].info);
printf("SP: %s\n", sp.info);
glDeleteShader(s[0].c);
glDeleteShader(s[1].c);
float v[48] = {
//Front
0.25, 0.25, 0.25, 1.0, 1.0, 0.0,
-0.25, 0.25, 0.25, 1.0, 0.0, 0.0,
-0.25, -0.25, 0.25, 0.0, 1.0, 1.0,
0.25, -0.25, 0.25, 0.0, 1.0, 0.0,
//Back
0.25, 0.25, -0.25, 0.0, 0.0, 1.0,
-0.25, 0.25, -0.25, 1.0, 0.0, 1.0,
-0.25, -0.25, -0.25, 1.0, 1.0, 1.0,
0.25, -0.25, -0.25, 0.0, 0.0, 0.0
};
unsigned int i[36] = {
//Front
0, 1, 2,
2, 3, 0,
//Right
0, 3, 7,
7, 4, 0,
//Left
1, 2, 6,
6, 5, 2,
//Back
4, 5, 6,
6, 7, 4,
//Up
0, 1, 5,
5, 4, 0,
//Down
3, 7, 2,
2, 6, 7
};
GLuint VAO, VBO, EBO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(v), v, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(i), i, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 6, (void *)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 6, (void *)(sizeof(float) * 3));
glEnableVertexAttribArray(1);
Vec3 cam = {1.0, 1.0, 1.0};
Vec3 theta = {0, 0, 0};
Key k = (const Key){ 0 };
printf("%d\n", k.alpha[9]);
SDL_Event e;
while (p) {
while (SDL_PollEvent(&e)) {
switch (e.type) {
case SDL_QUIT:
p = 0;
break;
case SDL_KEYDOWN:
*key(&k, e.key.keysym.sym) = 1;
break;
case SDL_KEYUP:
*key(&k, e.key.keysym.sym) = 0;
break;
}
}
if (*key(&k, SDLK_RIGHT)) {
cam.x += 0.01;
}
if (*key(&k, SDLK_LEFT)) {
cam.x -= 0.01;
}
if (*key(&k, SDLK_UP)) {
cam.y += 0.01;
}
if (*key(&k, SDLK_DOWN)) {
cam.y -= 0.01;
}
if (*key(&k, 'w')) {
theta.y += 0.01;
}
if (*key(&k, 's')) {
theta.y -= 0.01;
}
if (*key(&k, 'a')) {
theta.x -= 0.01;
}
if (*key(&k, 'd')) {
theta.x += 0.01;
}
if (*key(&k, 'z')) {
theta.z -= 0.01;
}
if (*key(&k, 'x')) {
theta.z += 0.01;
}
if (*key(&k, 'n')) {
cam.z += 0.01;
}
if (*key(&k, 'm')) {
cam.z -= 0.01;
}
one[1][1] = cos(theta.x);
one[1][2] = sin(theta.x);
one[2][1] = -sin(theta.x);
one[2][2] = cos(theta.x);
two[0][0] = cos(theta.y);
two[0][2] = -sin(theta.y);
two[2][0] = sin(theta.y);
two[2][2] = cos(theta.y);
three[0][0] = cos(theta.z);
three[0][1] = sin(theta.z);
three[1][0] = -sin(theta.z);
three[1][1] = cos(theta.z);
glUseProgram(sp.p);
glUniformMatrix3fv(2, 1, GL_FALSE, one[0]);
glUniformMatrix3fv(3, 1, GL_FALSE, two[0]);
glUniformMatrix3fv(4, 1, GL_FALSE, three[0]);
glUniform3f(5, cam.x, cam.y, cam.z);
glClear(GL_DEPTH_BUFFER_BIT);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, 0);
SDL_GL_SwapWindow(w);
}
glDeleteProgram(sp.p);
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
SDL_GL_DeleteContext(c);
SDL_DestroyWindow(w);
SDL_Quit();
return 0;
}
Vertex Shader(vs.glsl):
#version 450 core
layout (location = 0) in vec3 pos;
layout (location = 1) in vec3 tcol;
layout (location = 2) uniform mat3 x;
layout (location = 3) uniform mat3 y;
layout (location = 4) uniform mat3 z;
layout (location = 5) uniform vec3 c;
out vec3 col;
void main() {
vec3 d = x * y * z * (pos - c);
gl_Position.x = d.x / d.z;
gl_Position.y = d.y / d.z;
gl_Position.z = 0.0;
gl_Position.w = 1.0;
col = tcol;
}
Fragment Shader:
#version 450 core
out vec4 color;
in vec3 col;
void main() {
color = vec4(col, 1.0);
}
I think that keys.h and utils.h should not be here as they are not relevant to OpenGL. And this is a Minimum Reproducible Example as the only extra parts(keys.h and utils.h) are required for managing key data and loading shaders respectively.
Some keys in my code may be inverted, it's just bad code in all ways... Sorry for that.
This is an image I have taken after moving the cube(or the camera perspective to be accurate). One major thing to note is that it seems to be working perfectly other than the trail.
You need to clear the color buffer as well:
glClear(GL_DEPTH_BUFFER_BIT);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClear clears the specified buffers. The buffers are specified with a bit mask. GL_COLOR_BUFFER_BIT indicates to clear the buffers currently enabled for color writing.
The short answer is that you need to change:
glClear(GL_DEPTH_BUFFER_BIT);
...to...
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
More details
Comments have asked for more elaboration, so I'll elaborate.
When you say glClear(GL_DEPTH_BUFFER_BIT) it clears out the pixel values in the Z-Buffer (depth buffer). When you say glClear(GL_COLOR_BUFFER_BIT) it clears out the RGBA channels of the pixels in the color buffer (sets them to the glClearColor). If you say glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT) it clears both the Z-Buffer and the color buffer at the same time. That's what you want to do. You want to start each frame with a fresh black background and draw your content for that frame over top of it.
Think of it like setting each pixel to black and setting the depth value to zero. Actually, it will set the color buffer to the color specified by glClearColor and will set the depth value to the value specified by glClearDepth.
In your comment you said that you thought it "clears the functionality". That's not what glClear does. If you wanted to enable or disable writing to the depth buffer completely, you could do so with with glDepthMask. This function lets you completely disable writes to the depth buffer, potentially while still writing color values to the color buffer. There is a similar function called glColorMask that lets you select which channels of the color buffer (red, green, blue, and/or alpha) you want to write to as well. In this way you could potentially do interesting things like rendering only green, or even doing a special effect where you do a render pass in which you render only depth values and not color values (perhaps in preparation for knocking out a special effect to be applied in a subsequent pass.) glClear, conversely, actually sets the values in the pixels of the color buffer or depth buffer.
In the code you posted, you're only doing glClear(GL_DEPTH_BUFFER_BIT), which is only clearing the depth buffer, but not the color buffer. This essentially leaves all the paint on the canvas from last frame you drew, so leftover images from previous frames remain visible on the screen. You should be clearing both buffers.
Because you only draw your colorful square each frame, you draw a new square over top of whatever was in the buffer from last time. If you're double-buffering (common in full-screen graphics modes, but not windowed graphics modes), you may find that you're drawing over top of a frame from two-frames-ago, which may produce a strobing/flashing marquee effect.
The argument to glClear is called a bitmask. It uses each bit of the mask like a checkbox to select whether a particular kind of buffer should be cleared. Specifying GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT will logically OR the bits together creating a number with both bits set -- which is like checking both checkboxes, saying, "yes, please clear the depth buffer, and yes also clear the color buffer".
There can be up to four different kinds of buffers, not just color and depth. The four mask fields are GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_ACCUM_BUFFER_BIT, and GL_STENCIL_BUFFER_BIT. Each one of these is a bit-field value, a number with a single binary bit set, which can be logically OR'ed together like 4 individual checkboxes. In your application your render target may not have an accumulator buffer or a stencil buffer. Some render targets don't even make use of a depth buffer. It's totally up to how you created your render buffer originally. In your case it looks like you have a buffer with color and depth. So when it comes time to clear the buffer, in preparation for rendering the frame, you'll want to make sure you check both boxes, effectively asking for both the color and depth components of your buffers to be cleared. Do so by passing GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT as the argument to glClear.
The use of bit-fields here is so exemplary, that glClear is actually used on the Wikipedia page for Mask_(computing) - Uses of bitmasks to explain how bitmasks can be used!

OpenGL ARToolkit - Calling a draw function based of KeyEvent

I have two draw functions - "drawComponents" and "drawSignals" which look as follows:
static void drawComponents(ARdouble trans1[3][4], ARdouble trans2[3][4], int r, int g, int b )
{
for (int i = 0; i < numComponents; i++){
glPushMatrix(); // Save world coordinate system.
glTranslatef(0.0f, 0.0f, 0.0f);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, &(components[i].combCoords[0]));
glColor4ub(r, g, b, 0);
glDrawElements(GL_LINES, components[i].nIndeces, GL_UNSIGNED_INT, &(components[i].combIndeces[0]));
glDisableClientState(GL_VERTEX_ARRAY);
glPopMatrix(); // Restore world coordinate system.
}
}
static void drawSignals(ARdouble trans1[3][4], ARdouble trans2[3][4], int r, int g, int b)
{
for (int i = 0; i < numSignals; i++)
{
glPushMatrix(); // Save world coordinate system.
glTranslatef(0.0f, 0.0f, 0.0f);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, signal[i].combCoords);
glColor4ub(r, g, b, 0);
glDrawElements(GL_LINES, signal[i].nIndeces, GL_UNSIGNED_INT, &(signal[i].combIndeces[0]));
glPopMatrix();
}
}
These functions work fine if i just call the directly in the MainLoop, however I want to call them based on user input. So if the "1" key is pressed then the drawComponents will be called, and if the "2" key is pressed the the drawSignals function will be called.
To do this I have the following keyEvent function:
static void keyEvent(unsigned char key, int x, int y)
{
switch (key) {
case '1':
drawComponents(config->trans, config->marker[3].trans, 255, 0, 0);
break;
case '2':
drawSignals(config->trans, config->marker[3].trans, 0, 0, 255);
break;
}
}
I thought would work fine, however with this function, nothing happens when I press the keys. Anyone have any suggestions as to where I'm going wrong

OpenGL GL_POINTS result differs from input

I want to draw something with GL_POINTS but after ~totalpoint/3 result starts differ from input by 1 pixel
I tried different glOrtho and glViewport arguments but nothing changed
my test program:
int w = atoi(argv[1]);
int h = atoi(argv[2]);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, w, h, 0, 1.0, -1.0);
glMatrixMode(GL_MODELVIEW);
glEnable(GL_TEXTURE_2D);
glLoadIdentity();
unsigned int wf,hf;
unsigned char rgb[3];
while(!glfwWindowShouldClose(window)){
glClear(GL_COLOR_BUFFER_BIT);
glPointSize(1);
glBegin(GL_POINTS);
for(hf=0;hf<h;hf++){
for(wf=0;wf<w;wf++){
memset(rgb,0,3);
rgb[wf%3]=0xff;
glColor3ub(rgb[0],rgb[1],rgb[2]);
glVertex2f(wf,hf);
}
}
glEnd();
glfwSwapBuffers(window);
glfwPollEvents();
}
Results:
Not Colored
Colored
Michael Roy's way its solved my problem i just changed this line
GLFWwindow* wmain = glfwCreateWindow(atoi(argv[1]), atoi(argv[2]), "test", 0, 0);
to
GLFWwindow* wmain = glfwCreateWindow(atoi(argv[1]) + 1, atoi(argv[2]) + 1, "test", 0, 0);

OpenGL glBegin and glPushMatrix

I try to design a scene with 3 spheres and one line horizontal as equator. I got to draw the 3 spheres but I don't know why the line is not draw.
This is my code, for if you can see where I'm wrong:
#include <GL/gl.h>
#include <GL/glut.h>
void render(void);
void reshape(int w, int h);
int angle = 90;
int main(int argc, char **argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowPosition(50, 50);
glutInitWindowSize(800, 600);
glutCreateWindow("Planets");
glutDisplayFunc(render);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
void render(void) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(0, 0, 0, 1);
// Equator
glBegin(GL_LINES);
glColor3f(1,1,1);
glLineWidth(1);
glTranslated(0, 0, 0);
glVertex2f(0, 2);
glVertex2f(2,2);
glEnd();
// Sun
glPushMatrix();
glLoadIdentity();
glColor3f(1.0, 1.0, 0.0);
glTranslated(0, 0, -2);
glRotated(angle, 1, 0, 0);
glutWireSphere(.3, 20, 20);
glPopMatrix();
//Earth
glPushMatrix();
glLoadIdentity();
glColor3f(0.0, 0.0, 1.0);
glTranslated(0.7, 0, -2);
glRotated(angle, 1, 0, 0);
glutWireSphere(.15, 20, 20);
glPopMatrix();
// Moon
glPushMatrix();
glLoadIdentity();
glColor3f(1.0, 0.0, 1.0);
glTranslated(1, 0, -2);
glRotated(angle, 1, 0, 0);
glutWireSphere(.05, 10, 10);
glPopMatrix();
glutSwapBuffers();
}
void reshape(int w, int h) {
const double ar = (double) w / (double) h;
glViewport(0, 0, (GLsizei) w, (GLsizei) h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-ar, ar, -1.0, 1.0, 2.0, 100.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
You specify a frustum that has the near clip plane at z=-2. Your intended line would be drawn at z=0, thus outside the projection volume, thereby clipped into non-rendering.
glTranslate(0,0,0) is a no-op BTW.

OpenGL-4: How to create a circle and circle of lines using VOA

I have such a OpenGL-4 code (see below). I have created a buffer for vertices and wanted to initialize it with the help of for-loop in init().
It should be a circle of 30 lines (surrounded by circle later) but I can see only the first line on the screen. And I have done such things before with glVertex. But with VOA I don't really know what to do; I tried a lot but I'm really puzzled; May be It's some stupid mistake or my misunderstanding, I failed to find it. is it possible to do so with VOAs at all?
GLuint lineArrayID;
GLuint lineVertexBuffer;
GLuint numLines = 30;
static GLfloat lineVertexBufferData[30][2] = {};
void init() {
draw_circle();
glClearColor(1.0, 1.0, 1.0, 1.0);//background of the window
GLfloat x, y;
double angle;
GLfloat radius = 5.0;
angle = 2 * PI / 30;
int j = 0;
float i = 0;
for (i = -PI; i < -PI; i += 0.15){
std::cout << " +"<<std:: endl;
x = sin(i);
y = cos(i);
lineVertexBufferData[j][0] = 0.0; lineVertexBufferData[j][1] = 0.0;
lineVertexBufferData[j][0] = x; lineVertexBufferData[j][1] = y;
j++;
}
// compile and activate the desired shader program
shaderProgramID = loadShaders("D.vs", "D.fs");
glUseProgram(shaderProgramID);
// generate Buffers for our objects
prepareLines();
}
void prepareLines(){
glGenVertexArrays(1, &lineArrayID); //gen one array object
glBindVertexArray(lineArrayID); //binds it
glGenBuffers(1, &lineVertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, lineVertexBuffer);
glBufferData(GL_ARRAY_BUFFER, numLines * 60 * sizeof(GLfloat), lineVertexBufferData, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(0);
glBindVertexArray(0);
}
static void display(void) {
glClear(GL_COLOR_BUFFER_BIT);
// drawing the lines
glBindVertexArray(lineArrayID);
glCallList(1);
glDrawArrays(GL_LINES, 0, numLines * 2);
glBindVertexArray(0);
transform();
//glRotatef(grad, 0, 0, 1);
//glutSwapBuffers();
glFlush();
}
numLines * 60 * sizeof(GLfloat)
That is way too big, and doesn't match the size of linearVertexBufferData at all. It should be numLines * 2 * sizeof(GLfloat) or even just sizeof(lineVertexBufferData)
glCallList(1);
This is also invalid; you never create any display lists, so it's unlikely that display list one exists. If you're using VAO's, you shouldn't need to create them anyway.
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
The second parameter means that each vertex has three components, but judging from lineVertexBufferData, it should only have two.
glDrawArrays(GL_LINES, 0, numLines * 2);
The third parameter is the number of vertices to render, not number of components. This should not be multiplied by two.
//glutSwapBuffers();
glFlush();
SwapBuffers is correct here, not glFlush (which is almost never needed).
You're also calling transform() after you draw, when it should probably be before, otherwise your transformations will be delayed a frame.

Resources