I have a simple program in OpenGL that creates a ground plane, randomly placed tree objects, and has some lightning. There is an issue with the lightning that I am unable to solve. When the camera at a certain 'x' angle, the lightning seems to be not affecting the ground and it appears black. This an instant change, it does not gradually darken. Most of the ground is lit seemingly fine, but at certain angles it immediately turns black. The code is very messy and a lot of the core of it is part of the OpenGL tutorial on Lighthouse 3D because I am taking a class on OpenGL and this is the goal for the class, to take these tutorials and create something unique with it. Does anyone know how to fix this? Maybe lighting is implemented incorrectly? The code below is the code for drawing the ground plane.
// Draw ground
glColor3f(0.039f, 0.341f, 0.078f);
glNormal3f(1.0f, 1.0f, 1.0f);
glBegin(GL_QUADS);
glVertex3f(-1500.0f, -8.0f, -1500.0f);
glVertex3f(-1500.0f, -8.0f, 1500.0f);
glVertex3f( 1500.0f, -8.0f, 1500.0f);
glVertex3f( 1500.0f, -8.0f, -1500.0f);
glEnd();
This code is for the lighting.
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_LIGHT1);
glEnable(GL_LIGHT2);
glEnable(GL_AMBIENT);
glEnable(GL_DIFFUSE);
glEnable(GL_SPECULAR);
glDisable(GL_EMISSION);
GLfloat lightpos[] = {1.0, 1.0, 7.0, 1.0};
glLightfv(GL_LIGHT0, GL_POSITION, lightpos);
GLfloat ambientLight[] = { 0.5f, 0.5f, 0.5f, 1.0f };
GLfloat diffuseLight[] = { 0.8f, 0.8f, 0.8, 1.0f };
GLfloat specularLight[] = { 0.5f, 0.5f, 0.5f, 1.0f };
GLfloat position[] = { 1250.0f, 1250.0f, 2.0f, 1.0f };
GLfloat position2[] = { -1250.0, 1250.0f, 2.0f, 1.0f};
GLfloat position3[] = { 2.0f, 1250.0f, 1250.0f, 1.0f};
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight);
glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight);
glLightfv(GL_LIGHT0, GL_POSITION, position);
glLightfv(GL_LIGHT1, GL_POSITION, position2);
glLightfv(GL_LIGHT1, GL_AMBIENT, ambientLight);
glLightfv(GL_LIGHT1, GL_DIFFUSE, diffuseLight);
glLightfv(GL_LIGHT1, GL_SPECULAR, specularLight);
//glLightfv(GL_LIGHT2, GL_POSITION, position3);
//glLightfv(GL_LIGHT2, GL_AMBIENT, ambientLight);
//glLightfv(GL_LIGHT2, GL_DIFFUSE, diffuseLight);
//glLightfv(GL_LIGHT2, GL_SPECULAR, specularLight);
Related
I render a triangle strip this way, and with basic bypass shaders all is working fine:
EDIT:
I added TextCoords and modified the shaders , I keep getting the same result, my 3d objects are going black!
UPDATED CODE:
// Dibuixem tots els prismes
glBegin(GL_TRIANGLE_STRIP);
for(i=0;i<num_elems;i++) {
for(j=0;j<num_vertices;j++) {
glNormal3fv((GLfloat *)(a+j*2));
glTexCoord2f(0.0f, 0.0f);
glVertex3fv((GLfloat *)(a+j*2+1));
glTexCoord2f(1.0f, 0.0f);
glNormal3fv((GLfloat *)(b+j*2));
glTexCoord2f(1.0f, 1.0f);
glVertex3fv((GLfloat *)(b+j*2+1));
}
glNormal3fv((GLfloat *)(a));
glTexCoord2f(0.0f, 1.0f);
glVertex3fv((GLfloat *)(a+1));
glNormal3fv((GLfloat *)(b));
glTexCoord2f(0.0f, 0.0f);
glVertex3fv((GLfloat *)(b+1));
a+=face_size;
b+=face_size;
}
glEnd();
And I am trying to attach a texture to my shaders, but I can't figure out how to pass the texture.
I create and add the texture to my program this way. Texture data is verified
array with format unsigned char data[imageSize];:
GLuint textureID;
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
glActiveTexture(GL_TEXTURE0); // Texture unit 0
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0,GL_BGR, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
This is what I read in another posts with the same issue and I added to my code after compiling my shaders and generating my program without errors.
Tutorials tend to dismiss this information (how you say to your shader the name and location of your binded texture).
GLuint t1Location = glGetUniformLocation(programID, "tex1");
glUniform1i(t1Location, 0);
And my shaders UPDATED CODE:
#define GLSL(version, shader) "#version " #version "\n" #shader
const char* vert = GLSL
(
110,
varying vec4 position;
varying vec3 normal;
varying out vec4 texCoord;
varying vec2 coord;
void main()
{
position = gl_ModelViewMatrix * gl_Vertex;
normal = normalize( gl_NormalMatrix * gl_Normal.xyz );
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
coord = vec2(gl_MultiTexCoord0);
}
);
const char* frag = GLSL
(
110,
uniform sampler2D tex1;
varying vec4 position;
varying vec3 normal;
varying vec2 coord;
void main()
{
gl_FragColor = texture2D(tex1, coord);
}
);
EDIT2:
I am setting up gl this way:(maybe something is conflicting with my texture shader, but I don't think so!
/* set up depth-buffering */
glEnable(GL_DEPTH_TEST);
glEnable(GL_POLYGON_SMOOTH);
glHint(GL_POLYGON_SMOOTH_HINT, GL_FASTEST);
/* set up lights */
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL);
glShadeModel(GL_SMOOTH);
GLfloat lightpos[] = { 3.0, 0.0, 1.0, 0.0 };
GLfloat lightcolor[] = { 0.5, 0.5, 0.5, 1.0 };
GLfloat ambcolor[] = { 0.5, 0.5, 0.5, 1.0 };
glLightModelfv(GL_LIGHT_MODEL_AMBIENT,ambcolor);
glEnable(GL_LIGHTING);
glColorMaterial(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE);
glEnable (GL_COLOR_MATERIAL);
glLightfv (GL_LIGHT0,GL_POSITION,lightpos);
glLightfv (GL_LIGHT0,GL_AMBIENT,ambcolor);
glLightfv (GL_LIGHT0,GL_DIFFUSE,lightcolor);
glLightfv (GL_LIGHT0,GL_SPECULAR,lightcolor);
glLightf (GL_LIGHT0,GL_CONSTANT_ATTENUATION,0.2);
glLightf (GL_LIGHT0,GL_LINEAR_ATTENUATION,0.0);
glLightf (GL_LIGHT0,GL_QUADRATIC_ATTENUATION,1.0);
glEnable (GL_LIGHT0);
glEnable(GL_TEXTURE_2D);
Replacing gl_FragColor by a flat color is working fine.
I know maybe is related to coord parameter but I am trying all the stuff I found and nothing is working for me.
The internal texture format GL_BGR is not valid. GL_BGR is a valid for the format of the source texture, but the internal representation has to be GL_RGB.
See glTexImage2D.
Adapt your code like this:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
See the Khronos reference page GLAPI/glTexImage2D which says:
To define texture images, call glTexImage2D. The arguments describe the parameters of the texture image, such as height, width, width of the border, level-of-detail number (see glTexParameter), and number of color components provided. The last three arguments describe how the image is represented in memory.
format determines the composition of each element in data. It can assume one of these symbolic values:
GL_BGR:
Each element is an RGB triple. The GL converts it to floating point and assembles it into an RGBA element by attaching 1 for alpha. Each component is clamped to the range [0,1].
I'm working on a graphics model of the Moon rotating around the Earth. Right now, the Moon spins on its y axis while rotating around the Earth. How can I prevent the Moon from spinning but still allow it to orbit? Here's the code..
Edit:
Added an animation video to demonstrate problem:
http://www.youtube.com/watch?v=ltGV4pXD5Cs
void DrawInhabitants(GLint nShadow)
{
static GLfloat yRot = 0.0f; // Rotation angle for animation
if(nShadow == 0)
{
yRot += 0.2f;
}
// Draw the randomly located spheres
glBindTexture(GL_TEXTURE_2D, textureObjects[MOON_TEXTURE]);
glPushMatrix();
glTranslatef(0.0f, 0.1f, -2.5f);
glPushMatrix();
glRotatef(-yRot * 2.0f, 0.0f, 1.0f, 0.0f);
glTranslatef(1.0f, 0.0f, 0.0f);
gltDrawSphere(0.1f,21, 11);
glPopMatrix();
if(nShadow == 0)
{
// Torus alone will be specular
glMaterialfv(GL_FRONT, GL_SPECULAR, fBrightLight);
}
glRotatef(-yRot, 0.0f, 1.0f, 0.0f);
glBindTexture(GL_TEXTURE_2D, textureObjects[EARTH_TEXTURE]);
gltDrawSphere(0.3f, 21, 11);
glMaterialfv(GL_FRONT, GL_SPECULAR, fNoLight);
glPopMatrix();
}
The problem is that you're rotating the coordinate system in order to place the moon in its desired relative position. This rotation is global so it affects the orientation of the moon as well. You need to undo the rotation after translating, so you have "translation sandwich"
rotate a
translate
rotate -a
I have to read a 3D object from an ASE file. This object turns to be too big for the world I have to create, therefore, I must scale it down.
With its original size, it is properly lighted up.
However, once I scale it down, it becomes oversaturated.
The world is centered around (0, 0, 0) and it is 100 meters long (y axis) and 50 meters wide (x axis), my upVector is (0, 0, 1). There are two lights, light0 in (20, 35, 750) and light1 in (-20, -35, 750).
Relevant parts of the code:
void init(void){
glClearColor(0.827, 0.925, 0.949, 0.0);
glEnable(GL_DEPTH_TEST);
glEnable(GL_COLOR_MATERIAL);
glColorMaterial(GL_FRONT, GL_DIFFUSE);
glEnable(GL_LIGHT0);
glEnable(GL_LIGHT1);
glEnable(GL_LIGHTING);
glShadeModel(GL_SMOOTH);
GLfloat difusa[] = { 1.0f, 1.0f, 1.0f, 1.0f}; // white light
glLightfv(GL_LIGHT0, GL_DIFFUSE, difusa);
glLightfv(GL_LIGHT1, GL_DIFFUSE, difusa);
loadObjectFromFile("objeto.ASE");
}
void display ( void ) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(eyeX, eyeY, eyeZ, atX, atY, atZ, 0.0, 0.0, 1.0);
GLfloat posicion0[] = { 20.0f, 35.0f, 750.0f, 1.0f};
glLightfv(GL_LIGHT0, GL_POSITION, posicion0);
GLfloat posicion1[] = { -20.0f, -35.0f, 750.0f, 1.0f};
glLightfv(GL_LIGHT1, GL_POSITION, posicion1);
glColor3f(0.749, 0.918, 0.278);
glPushMatrix();
glTranslatef(0.0, 0.0, 1.5);
//Here comes the problem
glScalef(0.08, 0.08, 0.08);
glBegin(GL_TRIANGLES);
for(int i = 0; i < numFaces; i++){
glNormal3d(faces3D[i].n.nx, faces3D[i].n.ny, faces3D[i].n.nz);
glVertex3d(vertex[faces3D[i].s.A].x, vertex[faces3D[i].s.A].y, vertex[faces3D[i].s.A].z);
glVertex3d(vertex[faces3D[i].s.B].x, vertex[faces3D[i].s.B].y, vertex[faces3D[i].s.B].z);
glVertex3d(vertex[faces3D[i].s.C].x, vertex[faces3D[i].s.C].y, vertex[faces3D[i].s.C].z);
}
glEnd();
glPopMatrix();
glutSwapBuffers();
}
Why does lighting fail when the object is scaled down?
The problem you're running into is, that scaling the modelview matrix also influences the "normal matrix" normals are transformed with. The "normal matrix" is actually the transpose of the inverse of the modelview matrix. So by scaling down the modelview matrix, you're scaling up the normal matrix (because of the modelview inversion step used to obtain it).
Because of that the transformed normals must be rescaled, or normalized if the scale of the modelview matrix is not unitary. In fixed function OpenGL there are two methods to do this: Normal normalization (sounds funny, I know) and normal rescaling. You can enable either with
glEnable(GL_NORMALIZE);
glEnable(GL_RESCALE_NORMALS);
In a shader you'd simply normalize the transformed normal
#version ...
uniform mat3 mat_normal;
in vec3 vertex_normal;
void main()
{
...
vec3 view_normal = normalize( mat_normal * vertex_normal );
...
}
Depending on the setting of GL_NORMALIZE and GL_RESCALE_NORMALS, your normals can be transformed by the OpenGL-Pipeline.
Start with glEnable(GL_NORMALIZE) and see if that solves your problem
I have this code to draw an arrow:
const GLfloat vertices[] = {
-0.25f, -0.25f,
0.0f, 0.0f,
0.25f, -0.25f,
0.0f, 0.5f,
};
glVertexPointer(2, GL_FLOAT, 0, vertices);
glEnableClientState(GL_VERTEX_ARRAY);
glColor4f(0.0f, 0.5f, 0.0f, 1.0f);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
It should draw similar to this:
This is the actual result (which is undesired):
I don't see what I have done wrong, the vertices seem correct to me, but it seems like OpenGL draws the polygon in a different order than I specified. Can anyone help me out? Thanks in advance. :)
Your triangle strip takes the lower three points first (i.e. the lower part of your green arrow) and then the right three points. Just change the order of points in your definition:
const GLfloat vertices[] = {
-0.25f, -0.25f,
0.0f, 0.0f,
0.0f, 0.5f,
0.25f, -0.25f,
};
I've been learning OpenGL, and I decided to code a function to draw a unit cube centered at 0,0,0 so I could then transform it as I wished. It is made of 6 faces.
However, I can only transform one of the faces on my cube :(
Here's the code:
void myUnitCube() {
glPushMatrix();
glNormal3f(0.0,0.0, 1.0);
glTranslated(0.0,0.0,-0.5);
glRotated(180, 0.0,1.0,0.0);
glRectf(-0.5, -0.5, 0.5, 0.5);
glPopMatrix();
glPushMatrix();
glNormal3f(0.0,0.0, 1.0);
glTranslated(0.0,0.0,0.5);
glRectf(-0.5, -0.5, 0.5, 0.5);
glPopMatrix();
glPushMatrix();
glNormal3f(0.0,0.0, 1.0);
glTranslated(0.5,0.0,0.0);
glRotated(90, 0.0,1.0,0.0);
glRectf(-0.5, -0.5, 0.5, 0.5);
glPopMatrix();
glPushMatrix();
glNormal3f(0.0,1.0, 0.0);
glTranslated(-0.5,0.0,0.0);
glRotated(-90, 0.0,1.0,0.0);
glRectf(-0.5, -0.5, 0.5, 0.5);
glPopMatrix();
glPushMatrix();
glNormal3f(0.0,0.0, 0.0);
glTranslated(0.0,-0.5,0.0);
glRotated(90, 1.0,0.0,0.0);
glRectf(-0.5, -0.5, 0.5, 0.5);
glPopMatrix();
glPushMatrix();
glNormal3f(0.0,0.0, 0.0);
glTranslated(0.0,0.5,0.0);
glRotated(-90, 1.0,0.0,0.0);
glRectf(-0.5, -0.5, 0.5, 0.5);
glPopMatrix();
}
If I call myUnitCube() after:
glPushMatrix();
glTranslated(-4,0,-3);
glPushMatrix();
glScaled(8,0.1,6);
The result is that only the first face to be drawn gets scaled. How do I work around this?
I understand this situation arises because of the pops but I need them...
Thanks!
Are your matrix pushes and pops matched? Based on the code you gave, they are not. This may be the cause of your problem.