How to correctly make a depth cubemap for shadow mapping? - c

I have written code to render my scene objects to a cubemap texture of format GL_DEPTH_COMPONENT and then use this texture in a shader to determine whether a fragment is being directly lit or not, for shadowing purposes. However, my cubemap appears to come out as black. I suppose I am not setting up my FBO or rendering context sufficiently, but fail to see what is missing.
Using GL 3.3 in compatibility profile.
This is my code for creating the FBO and cubemap texture:
glGenFramebuffers(1, &fboShadow);
glGenTextures(1, &texShadow);
glBindTexture(GL_TEXTURE_CUBE_MAP, texShadow);
for (int sideId = 0; sideId < 6; sideId++) {
// Make sure GL knows what this is going to be.
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + sideId, 0, GL_DEPTH_COMPONENT, 512, 512, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
}
// Don't interpolate depth value sampling. Between occluder and occludee there will
// be an instant jump in depth value, not a linear transition.
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
My full rendering function then looks like so:
void render() {
// --- MAKE DEPTH CUBEMAP ---
// Set shader program for depth testing
glUseProgram(progShadow);
// Get the light for which we want to generate a depth cubemap
PointLight p = pointLights.at(0);
// Bind our framebuffer for drawing; clean it up
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fboShadow);
glClear(GL_DEPTH_BUFFER_BIT);
// Make 1:1-ratio, 90-degree view frustum for a 512x512 texture.
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(90.0, 1, 16.0, 16384.0);
glViewport(0, 0, 512, 512);
glMatrixMode(GL_MODELVIEW);
// Set modelview and projection matrix uniforms
setShadowUniforms();
// Need 6 renderpasses to complete each side of the cubemap
for (int sideId = 0; sideId < 6; sideId++) {
// Attach depth attachment of current framebuffer to level 0 of currently relevant target of texShadow cubemap texture.
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_CUBE_MAP_POSITIVE_X + sideId, texShadow, 0);
// All is fine.
GLenum status = glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
std::cout << "Shadow FBO is broken with code " << status << std::endl;
}
// Push modelview matrix stack because we need to rotate and move camera every time
glPushMatrix();
// This does a switch-case with glRotatefs
rotateCameraForSide(GL_TEXTURE_CUBE_MAP_POSITIVE_X + sideId);
// Render from light's position.
glTranslatef(-p.getX(), -p.getY(), -p.getZ());
// Render all objects.
for (ObjectList::iterator it = objectList.begin(); it != objectList.end(); it++) {
(*it)->render();
}
glPopMatrix();
}
// --- RENDER SCENE ---
// Bind default framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// Setup proper projection matrix with 70 degree vertical FOV and ratio according to window frame dimensions.
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(70.0, ((float)vpWidth) / ((float)vpHeight), 16.0, 16384.0);
glViewport(0, 0, vpWidth, vpHeight);
glUseProgram(prog);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
applyCameraPerspective();
// My PointLight class has both a position (world space) and renderPosition (camera space) Vec3f variable;
// The lights' renderPositions get transformed with the modelview matrix by this.
updateLights();
// And here, among other things, the lights' camera space coordinates go to the shader.
setUniforms();
// Render all objects
for (ObjectList::iterator it = objectList.begin(); it != objectList.end(); it++) {
// Object texture goes to texture unit 0
GLuint usedTexture = glTextureList.find((*it)->getTextureName())->second;
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, usedTexture);
glUniform1i(textureLoc, 0);
// Cubemap goes to texture unit 1
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_CUBE_MAP, texShadow);
glUniform1i(shadowLoc, 1);
(*it)->render();
}
glPopMatrix();
frameCount++;
}
The shader program for rendering depth values ("progShadow") is simple.
Vertex shader:
#version 330
in vec3 position;
uniform mat4 modelViewMatrix, projectionMatrix;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1);
}
Fragment shader:
#version 330
void main() {
// OpenGL sets the depth anyway. Nothing to do here.
}
The shader program for final rendering ("prog") has a fragment shader which looks something like this:
#version 330
#define MAX_LIGHTS 8
in vec3 fragPosition;
in vec3 fragNormal;
in vec2 fragTexCoordinates;
out vec4 fragColor;
uniform sampler2D colorTexture;
uniform samplerCubeShadow shadowCube;
uniform uint activeLightCount;
struct Light {
vec3 position;
vec3 diffuse;
float cAtt;
float lAtt;
float qAtt;
};
// Index 0 to (activeLightCount - 1) need to be the active lights.
uniform Light lights[MAX_LIGHTS];
void main() {
vec3 lightColor = vec3(0, 0, 0);
vec3 normalFragmentToLight[MAX_LIGHTS];
float distFragmentToLight[MAX_LIGHTS];
float distEyeToFragment = length(fragPosition);
// Accumulate all light in "lightColor" variable
for (uint i = uint(0); i < activeLightCount; i++) {
normalFragmentToLight[i] = normalize(lights[i].position - fragPosition);
distFragmentToLight[i] = distance(fragPosition, lights[i].position);
float attenuation = (lights[i].cAtt
+ lights[i].lAtt * distFragmentToLight[i]
+ lights[i].qAtt * pow(distFragmentToLight[i], 2.0));
float dotProduct = dot(fragNormal, normalFragmentToLight[i]);
lightColor += lights[i].diffuse * max(dotProduct, 0.0) / attenuation;
}
// Shadow mapping only for light at index 0 for now.
float distOccluderToLight = texture(shadowCube, vec4(normalFragmentToLight[0], 1));
// My geometries use inches as units, hence a large bias of 1
bool isLit = (distOccluderToLight + 1) < distFragmentToLight[0];
fragColor = texture2D(colorTexture, fragTexCoordinates) * vec4(lightColor, 1.0f) * int(isLit);
}
I have verified that all uniform location variables are set to a proper value (i.e. not -1).
It might be worth noting I do no call to glBindFragDataLocation() for "progShadow" prior to linking it, because no color value should be written by that shader.
See anything obviously wrong here?

For shadow maps, depth buffer internal format is pretty important (too small and things look awful, too large and you eat memory bandwidth). You should use a sized format (e.g. GL_DEPTH_COMPONENT24) to guarantee a certain size, otherwise the implementation will pick whatever it wants. As for debugging a cubemap shadow map, the easiest thing to do is actually to draw the scene into each cube face and output color instead of depth. Then, where you currently try to use the cubemap to sample depth, write the sampled color to fragColor instead. You can rule out view issues immediately this way.
There is another much more serious issue, however. You are using samplerCubeShadow, but you have not set GL_TEXTURE_COMPARE_MODE for your cube map. Attempting to sample from a depth texture with this sampler type and without GL_TEXTURE_COMPARE_MODE = GL_COMPARE_REF_TO_TEXTURE will produce undefined results. Even if you did have this mode set properly, the 4th component of the texture coordinates are used as the depth comparison reference -- a constant value of 1.0 is NOT what you want.
Likewise, the depth buffer does not store linear distance, you cannot directly compare the distance you computed here:
distFragmentToLight[i] = distance(fragPosition, lights[i].position);
Instead, something like this will be necessary:
float VectorToDepth (vec3 Vec)
{
vec3 AbsVec = abs(Vec);
float LocalZcomp = max(AbsVec.x, max(AbsVec.y, AbsVec.z));
// Replace f and n with the far and near plane values you used when
// you drew your cube map.
const float f = 2048.0;
const float n = 1.0;
float NormZComp = (f+n) / (f-n) - (2*f*n)/(f-n)/LocalZcomp;
return (NormZComp + 1.0) * 0.5;
}
float LightDepth = VectorToDepth (fragPosition - lights [i].position);
float depth_compare = texture(shadowCube,vec4(normalFragmentToLight[0],LightDepth));
* Code for float VectorToDepth (vec3 Vec)borrowed from Omnidirectional shadow mapping with depth cubemap
Now depth_compare will be a value between 0.0 (completely in shadow) and 1.0 (completely out of shadow). If you have linear texture filtering enabled, the hardware will sample the depth at 4 points and may give you a form of 2x2 PCF filtering. If you have nearest texture filtering, then it will either be 1.0 or 0.0.

Related

Using gluLookAt() correctly?

I am trying to set the angle of View with gluLookAt()
Here I have my code where I tried to set the camera without results
Here the function displaycone():
void displayCone(void)
{
glMatrixMode(GL_MODELVIEW);
// clear the drawing buffer.
glClear(GL_COLOR_BUFFER_BIT);
// clear the identity matrix.
glLoadIdentity();
// traslate the draw by z = -4.0
// Note this when you decrease z like -8.0 the drawing will looks far , or smaller.
glTranslatef(0.0,0.0,-4.5);
// Red color used to draw.
glColor3f(0.8, 0.2, 0.1);
// changing in transformation matrix.
// rotation about X axis
glRotatef(xRotated,1.0,0.0,0.0);
// rotation about Y axis
glRotatef(yRotated,0.0,1.0,0.0);
// rotation about Z axis
glRotatef(zRotated,0.0,0.0,1.0);
// scaling transfomation
glScalef(1.0,1.0,1.0);
// built-in (glut library) function , draw you a Cone.
// move the peak of the cone to the origin
glTranslatef(0.0, 0.0, -height);
glutSolidCone(base,height,slices,stacks);
// Flush buffers to screen
gluLookAt(3,3,3,0,0,-4.5,0,1,0);
glFlush();
// sawp buffers called because we are using double buffering
// glutSwapBuffers();
}
With my main:
int main (int argc, char **argv)
{
glutInit(&argc, argv);
//double buffering used to avoid flickering problem in animation
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
// window size
glutInitWindowSize(400,350);
// create the window
glutCreateWindow("Cone Rotating Animation");
glPolygonMode(GL_FRONT_AND_BACK,GL_LINE);
glClearColor(0.0,0.0,0.0,0.0);
//Assign the function used in events
glutDisplayFunc(displayCone);
glutReshapeFunc(reshapeCone);
glutIdleFunc(idleCone);
//Let start glut loop
glutMainLoop();
return 0;
}
The function idlecone instead changes the values of xRotated, yRotated... and displays the cone. Any ideas?
I am pretty sure I didn't understand the right moment where to use gluLookAt()...
gluLookAt changes the current matrix, similar to glTranslatef or glRotatef.
The operation defines a transformation matrix and multiplies the current matrix by the new transformation matrix.
gluLookAt has to be called before glutSolidCone, e.g.:
void displayCone(void)
{
// set matrix mode
glMatrixMode(GL_MODELVIEW);
// clear model view matrix
glLoadIdentity();
// multiply view matrix to current matrix
gluLookAt(3,3,3,0,0,-4.5,0,1,0); // <----------------------- add
// clear the drawing buffer.
glClear(GL_COLOR_BUFFER_BIT);
// traslate the draw by z = -4.0
// Note this when you decrease z like -8.0 the drawing will looks far , or smaller.
glTranslatef(0.0,0.0,-4.5);
// Red color used to draw.
glColor3f(0.8, 0.2, 0.1);
// changing in transformation matrix.
// rotation about X axis
glRotatef(xRotated,1.0,0.0,0.0);
// rotation about Y axis
glRotatef(yRotated,0.0,1.0,0.0);
// rotation about Z axis
glRotatef(zRotated,0.0,0.0,1.0);
// scaling transfomation
glScalef(1.0,1.0,1.0);
// built-in (glut library) function , draw you a Cone.
// move the peak of the cone to the origin
glTranslatef(0.0, 0.0, -height);
glutSolidCone(base,height,slices,stacks);
// Flush buffers to screen
// gluLookAt(3,3,3,0,0,-4.5,0,1,0); <----------------------- delete
glFlush();
// sawp buffers called because we are using double buffering
// glutSwapBuffers();
}

Texturing a sphere in OpenGL with glTexGen

I want to get an earth texture on sphere. My sphere is an icosphere built with many triangles (100+) and I found it confusing to set the UV coordinates for whole sphere. I tried to use glTexGen and effects are quite close but I got my texture repeated 8 times (see image) . I cannot find a way to make it just wrap the whole object once. Here is my code where the sphere and textures are created.
glEnable(GL_TEXTURE_2D);
glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR);
glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR);
glBegin(GL_TRIANGLES);
for (int i = 0; i < new_sphere->NumOfTrians; i++)
{
Triangle *draw_Trian = new_sphere->Trians+i;
glVertex3f(draw_Trian->pnts[0].coords[0], draw_Trian->pnts[0].coords[1], draw_Trian->pnts[0].coords[2]);
glVertex3f(draw_Trian->pnts[1].coords[0], draw_Trian->pnts[1].coords[1], draw_Trian->pnts[1].coords[2]);
glVertex3f(draw_Trian->pnts[2].coords[0], draw_Trian->pnts[2].coords[1], draw_Trian->pnts[2].coords[2]);
}
glDisable(GL_TEXTURE_2D);
free(new_sphere->Trians);
free(new_sphere);
glEnd();
You need to define how your texture is supposed to map to your triangles. This depends on the texture you're using. There are a multitude of ways to map the surface of a sphere with a texture (since no one mapping is free of singularities). It looks like you have a cylindrical projection texture there. So we will emit cylindrical UV coordinates.
I've tried to give you some code here, but it's assuming that
Your mesh is a unit sphere (i.e., centered at 0 and has radius 1)
pnts.coords is an array of floats
You want to use the second coordinate (coord[1]) as the 'up' direction (or the height in a cylindrical mapping)
Your code would look something like this. I've defined a new function for emitting cylindrical UVs, so you can put that wherever you like.
/* Map [(-1, -1, -1), (1, 1, 1)] into [(0, 0), (1, 1)] cylindrically */
inline void uvCylinder(float* coord) {
float angle = 0.5f * atan2(coord[2], coord[0]) / 3.14159f + 0.5f;
float height = 0.5f * coord[1] + 0.5f;
glTexCoord2f(angle, height);
}
glEnable(GL_TEXTURE_2D);
glBegin(GL_TRIANGLES);
for (int i = 0; i < new_sphere->NumOfTrians; i++) {
Triangle *t = new_sphere->Trians+i;
uvCylinder(t->pnts[0].coords);
glVertex3f(t->pnts[0].coords[0], t->pnts[0].coords[1], t->pnts[0].coords[2]);
uvCylinder(t->pnts[1].coords);
glVertex3f(t->pnts[1].coords[0], t->pnts[1].coords[1], t->pnts[1].coords[2]);
uvCylinder(t->pnts[2].coords);
glVertex3f(t->pnts[2].coords[0], t->pnts[2].coords[1], t->pnts[2].coords[2]);
}
glEnd();
glDisable(GL_TEXTURE_2D);
free(new_sphere->Trians);
free(new_sphere);
Note on Projections
The reason it's confusing to build UV coordinates for the whole sphere is that there isn't one 'correct' way to do it. Mathematically-speaking, there's no such thing as a perfect 2D mapping of a sphere; hence why we have so many different types of projections. When you have a 2D image that's a texture for a spherical object, you need to know what type of projection that image was built for, so that you can emit the correct UV coordinates for that texture.

In OpenGL ES 2.0 depth texture buffer is not working when using FBO with texture depth buffer

Currently I'm working on a project for rendering applications to Framebuffer Object(FBO) first, and rendering back the applications by using the FBO color and depth texture attachments in OpenGL ES 2.0 .
Now multiple applications are rendered well with color buffers. When I'm trying to use depth information from depth texture buffer, it seems not working.
I tried to render depth texture by sampling it with texture coordinate, it is all white. People say the grayscale may differ quite slightly, namely even in the shadow part it's near to 1.0. So I modify my fragment shader to like following:
vec4 depth;
depth = texture2D(s_depth0, v_texCoord);
if(depth.r == 1.0)
gl_FragColor = vec4(1.0,0.0,0.0,1.0);
And without surprise, it's all red.
The application code:
void Draw ( ESContext *esContext )
{
UserData *userData = esContext->userData;
// Clear the color buffer
glClear ( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Draw a triangle
GLfloat vVertices[] = { 0.0f, 0.5f, 0.5f,
-0.5f, -0.5f,-0.5f,
0.5f, -0.5f,-0.5f };
// Set the viewport
glViewport ( 0, 0, esContext->width, esContext->height );
// Use the program object
glUseProgram ( userData->programObject );
// Load the vertex position
glVertexAttribPointer ( 0, 3, GL_FLOAT, GL_FALSE, 0, vVertices );
glEnableVertexAttribArray ( 0 );
glDrawArrays ( GL_TRIANGLES, 0, 3 );
eglSwapBuffers ( esContext->eglDisplay, esContext->eglSurface );
}
So, what would be the problem, if color buffer works fine, while depth buffer doesn't work?
I solved it finally. That reason ends up to be a lack of GL_DEPTH_TEST enabling from client applications.
So if you got the same problem, be sure to enable GL_DEPTH_TEST by calling glEnable(GL_DEPTH_TEST); during OpenGL ES initialization. By default it's disabled for the sake of performance I guess.
Thanks for all advices and answers.
The depth texture needs to be linearized to be seen in the viewport because it's saved in exponential form. Try this in fragment:
uniform float farClip, nearClip;
float depth = texture2D(s_depth0, v_texCoord).x;
float depthLinear = (2 * nearClip) / (farClip + nearClip - depth * (farClip - nearClip));

opengl failing to draw mesh

SOLVED: I'm not really sure how though... thanks for all your help guys.
I tried glDisable(GL_CULL_FACE); but the mesh is still not visible.
Basically I'm trying to draw a mesh (made from verts, normals, and texture coords) in OpenGL, using a display list. The mesh is on .obj format (exported from 3ds max 2013)
The problem is that the mesh is not visible.
To draw the display list I'm just using glCallLists (list, 1);
I have verified that I can draw things to the screen by drawing a point in the center of the screen and that works fine.
Could it be possible that the camera is positioned inside the mesh? If so is there an OpenGL state that I could enable to allow me to see the inside of a set of verts?
I know that the data I have is all valid, verified by printing each vert, normal and texture coord to a file before adding it to the display list, it looks valid.
I have dont no glTranslatef or anything like that, my projection matrix is setup like this:
glMatrixMode (GL_PROJECTION);
glLoadIdentity ();
gluPerspective (45.0, (float)1024/(float)768, -9999, 9999);
glMatrixMode (GL_MODELVIEW);
glLoadIdentity ();
If you want to have a look at the .obj file, here it is: http://pastebin.com/PpG3vG5e
This is how I create the display list:
list = glGenLists (1);
glNewList (list, GL_COMPILE);
glBegin (GL_TRIANGLES);
for (i = 0; i < data.face_count; i++)
{
// first vert
normal[0][0] = (float)data.vertex_normal_list[data.face_list[i]->normal_index[0]]->e[0];
normal[0][1] = (float)data.vertex_normal_list[data.face_list[i]->normal_index[0]]->e[1];
normal[0][2] = (float)data.vertex_normal_list[data.face_list[i]->normal_index[0]]->e[2];
tex[0][0] = (float)data.vertex_texture_list[data.face_list[i]->texture_index[0]]->e[0];
tex[0][1] = (float)data.vertex_texture_list[data.face_list[i]->texture_index[0]]->e[1];
tex[0][2] = (float)data.vertex_texture_list[data.face_list[i]->texture_index[0]]->e[2];
vert[0][0] = (float)data.vertex_list[data.face_list[i]->vertex_index[0]]->e[0];
vert[0][1] = (float)data.vertex_list[data.face_list[i]->vertex_index[0]]->e[1];
vert[0][2] = (float)data.vertex_list[data.face_list[i]->vertex_index[0]]->e[2];
// second vert
normal[1][0] = (float)data.vertex_normal_list[data.face_list[i]->normal_index[1]]->e[0];
normal[1][1] = (float)data.vertex_normal_list[data.face_list[i]->normal_index[1]]->e[1];
normal[1][2] = (float)data.vertex_normal_list[data.face_list[i]->normal_index[1]]->e[2];
tex[1][0] = (float)data.vertex_texture_list[data.face_list[i]->texture_index[1]]->e[0];
tex[1][1] = (float)data.vertex_texture_list[data.face_list[i]->texture_index[1]]->e[1];
tex[1][2] = (float)data.vertex_texture_list[data.face_list[i]->texture_index[1]]->e[2];
vert[1][0] = (float)data.vertex_list[data.face_list[i]->vertex_index[1]]->e[0];
vert[1][1] = (float)data.vertex_list[data.face_list[i]->vertex_index[1]]->e[1];
vert[1][2] = (float)data.vertex_list[data.face_list[i]->vertex_index[1]]->e[2];
// third vert
normal[2][0] = (float)data.vertex_normal_list[data.face_list[i]->normal_index[2]]->e[0];
normal[2][1] = (float)data.vertex_normal_list[data.face_list[i]->normal_index[2]]->e[1];
normal[2][2] = (float)data.vertex_normal_list[data.face_list[i]->normal_index[2]]->e[2];
tex[2][0] = (float)data.vertex_texture_list[data.face_list[i]->texture_index[2]]->e[0];
tex[2][1] = (float)data.vertex_texture_list[data.face_list[i]->texture_index[2]]->e[1];
tex[2][2] = (float)data.vertex_texture_list[data.face_list[i]->texture_index[2]]->e[2];
vert[2][0] = (float)data.vertex_list[data.face_list[i]->vertex_index[2]]->e[0];
vert[2][1] = (float)data.vertex_list[data.face_list[i]->vertex_index[2]]->e[1];
vert[2][2] = (float)data.vertex_list[data.face_list[i]->vertex_index[2]]->e[2];
for (j = 0; j < 3; j++)
{
glNormal3f (normal[j][0], normal[j][1], normal[j][2]);
glTexCoord3f (tex[j][0], tex[j][1], tex[j][2]);
glVertex3f (vert[j][0], vert[j][1], vert[j][2]);
}
}
glEnd ();
glEndList ();
EDIT:
I've tried things like:
glTranslatef (0, 0, 5);
glCallList (mesh);
glTranslatef (0, 0, 0);
but they don't work either :(
EDIT:
#datenwolf
Here is the code I use to draw it:
Draw_Begin ();
Mdl_Draw (list, 0.0f, 0.0f, 0.0f);
Draw_End ();
This
gluPerspective (45.0, (float)1024/(float)768, -9999, 9999);
is wrong. In a perspective projection both the near and the far plane distance must be of the same sign, i.e. both positive or both negative. Also the absolute value of the near plane must be smaller than the absolute value of the far plane. And the near plane distance must be nonzero. In mathematical notation:
sgn(near) = sgn(far) ^ 0 < |near| < |far|
Usually both near and far are chosen positive. Also as a rule of thumb the near clipping plane should be chosen as fer away as possible. The far plane can be placed at infinity (exploting some of the properties of homogenous matrices), but usually is placed as close as possible to max out depth buffer resolution.

Strange square lighting artefacts in OpenGL

I have a program that generates a heightmap and then displays it as a mesh with OpenGL. When I try to add lighting, it ends up with weird square shapes covering the mesh. They are more noticeable in some areas than others, but are always there.
I was using a quad mesh, but nothing changed after switching to a triangle mesh. I've used at least three different methods to calculate the vertex normals, all with the same effect. I was doing the lighting manually with shaders, but nothing changes when using the builtin
OpenGL lighting system.
My latest normal-generating code (faces is an array of indices into verts, the vertex array):
int i;
for (i = 0; i < NINDEX; i += 3) {
vec v[3];
v[0] = verts[faces[i + 0]];
v[1] = verts[faces[i + 1]];
v[2] = verts[faces[i + 2]];
vec v1 = vec_sub(v[1], v[0]);
vec v2 = vec_sub(v[2], v[0]);
vec n = vec_norm(vec_cross(v2, v1));
norms[faces[i + 0]] = vec_add(norms[faces[i + 0]], n);
norms[faces[i + 1]] = vec_add(norms[faces[i + 1]], n);
norms[faces[i + 2]] = vec_add(norms[faces[i + 2]], n);
}
for (i = 0; i < NVERTS; i++) {
norms[i] = vec_norm(norms[i]);
}
Although that isn't the only code I've used, so I doubt that it is the cause of the problem.
I draw the mesh with:
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, verts);
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, 0, norms);
glDrawElements(GL_TRIANGLES, NINDEX, GL_UNSIGNED_SHORT, faces);
And I'm not currently using any shaders.
What could be causing this?
EDIT: A more comprehensive set of screenshots:
Wireframe
Flat shading, OpenGL lighting
Smooth shading, OpenGL lighting
Lighting done in shader
For the last one, the shader code is
Vertex:
varying vec3 lightvec, normal;
void main() {
vec3 lightpos = vec3(0, 0, 100);
vec3 v = vec3(gl_ModelViewMatrix * gl_Vertex);
normal = gl_NormalMatrix * gl_Normal;
lightvec = normalize(lightpos - v);
gl_Position = ftransform();
}
Fragment:
varying vec3 lightvec, normal;
void main(void) {
float l = dot(lightvec, normal);
gl_FragColor = vec4(l, l, l, 1);
}
You need to either normalize the normal in the fragment shader, like so:
varying vec3 lightvec, normal;
void main(void) {
vec3 normalNormed = normalize(normal);
float l = dot(lightvec, normalNormed);
gl_FragColor = vec4(l, l, l, 1);
}
This can be expensive though. What will also work in this case, with directional lights, is to use vertex lighting. So calculate the light value in the vertex shader
varying float lightItensity;
void main() {
vec3 lightpos = vec3(0, 0, 100);
vec3 v = vec3(gl_ModelViewMatrix * gl_Vertex);
normal = gl_NormalMatrix * gl_Normal;
lightvec = normalize(lightpos - v);
lightItensity = dot(normal, lightvec);
gl_Position = ftransform();
}
and use it in the fragment shader,
varying float light;
void main(void) {
float l = light;
gl_FragColor = vec4(l, l, l, 1);
}
I hope this fixes it, let me know if it doesn't.
EDIT: Heres a small diagram that explains what is most likely happening
EDIT2:
If that doesn't help, add more triangles. Interpolate the values of your heightmap and add some vertices in between.
Alternatively, try changing your tesselation scheme. For example a mesh of equilateral triangles like so could make the artifacts less prominent.
You'll have to do some interpolation on your heightmap.
Otherwise I have no idea.. Good luck!
I don't have a definitive answer for the non-shader versions, but I wanted to add that if you're doing per pixel lighting in your fragment shader, you should probably be normalizing the normal and lightvec inside the fragment shader.
If you don't do this they not be unit length (a linear interpolation between two normalized vectors is not necessarily normalized). This could explain some of the artifacts you see in the shader version, as the magnitude of the dot product would vary as a function of the distance from the vertices, which kind of looks like what you're seeing.
EDIT: Another thought, are you doing any non-uniform scaling (different x,y,z) of the mesh when rendering the non-shader version? If you scale it, then you need to either modify the normals by the inverse scale factor, or set glEnable(GL_NORMALIZE). See here for more:
http://www.lighthouse3d.com/tutorials/glsl-tutorial/normalization-issues/

Resources