GLSL Shader going black when I try to sample a texture - c

I render a triangle strip this way, and with basic bypass shaders all is working fine:
EDIT:
I added TextCoords and modified the shaders , I keep getting the same result, my 3d objects are going black!
UPDATED CODE:
// Dibuixem tots els prismes
glBegin(GL_TRIANGLE_STRIP);
for(i=0;i<num_elems;i++) {
for(j=0;j<num_vertices;j++) {
glNormal3fv((GLfloat *)(a+j*2));
glTexCoord2f(0.0f, 0.0f);
glVertex3fv((GLfloat *)(a+j*2+1));
glTexCoord2f(1.0f, 0.0f);
glNormal3fv((GLfloat *)(b+j*2));
glTexCoord2f(1.0f, 1.0f);
glVertex3fv((GLfloat *)(b+j*2+1));
}
glNormal3fv((GLfloat *)(a));
glTexCoord2f(0.0f, 1.0f);
glVertex3fv((GLfloat *)(a+1));
glNormal3fv((GLfloat *)(b));
glTexCoord2f(0.0f, 0.0f);
glVertex3fv((GLfloat *)(b+1));
a+=face_size;
b+=face_size;
}
glEnd();
And I am trying to attach a texture to my shaders, but I can't figure out how to pass the texture.
I create and add the texture to my program this way. Texture data is verified
array with format unsigned char data[imageSize];:
GLuint textureID;
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
glActiveTexture(GL_TEXTURE0); // Texture unit 0
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0,GL_BGR, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
This is what I read in another posts with the same issue and I added to my code after compiling my shaders and generating my program without errors.
Tutorials tend to dismiss this information (how you say to your shader the name and location of your binded texture).
GLuint t1Location = glGetUniformLocation(programID, "tex1");
glUniform1i(t1Location, 0);
And my shaders UPDATED CODE:
#define GLSL(version, shader) "#version " #version "\n" #shader
const char* vert = GLSL
(
110,
varying vec4 position;
varying vec3 normal;
varying out vec4 texCoord;
varying vec2 coord;
void main()
{
position = gl_ModelViewMatrix * gl_Vertex;
normal = normalize( gl_NormalMatrix * gl_Normal.xyz );
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
coord = vec2(gl_MultiTexCoord0);
}
);
const char* frag = GLSL
(
110,
uniform sampler2D tex1;
varying vec4 position;
varying vec3 normal;
varying vec2 coord;
void main()
{
gl_FragColor = texture2D(tex1, coord);
}
);
EDIT2:
I am setting up gl this way:(maybe something is conflicting with my texture shader, but I don't think so!
/* set up depth-buffering */
glEnable(GL_DEPTH_TEST);
glEnable(GL_POLYGON_SMOOTH);
glHint(GL_POLYGON_SMOOTH_HINT, GL_FASTEST);
/* set up lights */
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL);
glShadeModel(GL_SMOOTH);
GLfloat lightpos[] = { 3.0, 0.0, 1.0, 0.0 };
GLfloat lightcolor[] = { 0.5, 0.5, 0.5, 1.0 };
GLfloat ambcolor[] = { 0.5, 0.5, 0.5, 1.0 };
glLightModelfv(GL_LIGHT_MODEL_AMBIENT,ambcolor);
glEnable(GL_LIGHTING);
glColorMaterial(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE);
glEnable (GL_COLOR_MATERIAL);
glLightfv (GL_LIGHT0,GL_POSITION,lightpos);
glLightfv (GL_LIGHT0,GL_AMBIENT,ambcolor);
glLightfv (GL_LIGHT0,GL_DIFFUSE,lightcolor);
glLightfv (GL_LIGHT0,GL_SPECULAR,lightcolor);
glLightf (GL_LIGHT0,GL_CONSTANT_ATTENUATION,0.2);
glLightf (GL_LIGHT0,GL_LINEAR_ATTENUATION,0.0);
glLightf (GL_LIGHT0,GL_QUADRATIC_ATTENUATION,1.0);
glEnable (GL_LIGHT0);
glEnable(GL_TEXTURE_2D);
Replacing gl_FragColor by a flat color is working fine.
I know maybe is related to coord parameter but I am trying all the stuff I found and nothing is working for me.

The internal texture format GL_BGR is not valid. GL_BGR is a valid for the format of the source texture, but the internal representation has to be GL_RGB.
See glTexImage2D.
Adapt your code like this:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
See the Khronos reference page GLAPI/glTexImage2D which says:
To define texture images, call glTexImage2D. The arguments describe the parameters of the texture image, such as height, width, width of the border, level-of-detail number (see glTexParameter), and number of color components provided. The last three arguments describe how the image is represented in memory.
format​ determines the composition of each element in data​. It can assume one of these symbolic values:
GL_BGR:
Each element is an RGB triple. The GL converts it to floating point and assembles it into an RGBA element by attaching 1 for alpha. Each component is clamped to the range [0,1].

Related

Writing to a GL_R8 texture

I am having some trouble writing to a texture (GL_R8 Format) attached to an FBO while reading from another texture attached to the same FBO (GL_RGB32F). I believe the problem is with the output type in my fragment shader.
Texture Initialization:
glGenFramebuffers(1, &rBuffer->fbo);
glBindFramebuffer(GL_FRAMEBUFFER, rBuffer->fbo);
glGenTextures(RayBuffer_TextureType_NUMTEXTURES, rBuffer->textures);
...
glBindTexture(GL_TEXTURE_2D, rBuffer->textures[RayBuffer_TextureType_SHADOW]);
glTexImage2D
(
GL_TEXTURE_2D,
0,
GL_R8,
textureWidth,
textureHeight,
0,
GL_RED,
GL_UNSIGNED_BYTE,
NULL
);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glFramebufferTexture2D
(
GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW,
GL_TEXTURE_2D,
rBuffer->textures[RayBuffer_TextureType_SHADOW],
0
);
Binding:
glBindFramebuffer(GL_FRAMEBUFFER, rBuffer->fbo);
glDrawBuffer(GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW);
glActiveTexture(GL_TEXTURE0 + RayBuffer_TextureType_POSITION);
glBindTexture(GL_TEXTURE_2D, rBuffer->textures[RayBuffer_TextureType_POSITION]);
Geometry Shader:
#version 330
layout(triangles) in;
layout (triangle_strip, max_vertices=4) out;
uniform sampler2D positionTexture;
uniform vec3 lightDirection;
uniform vec3 rightDirection;
uniform vec3 upDirection;
uniform vec2 screenSize;
void main()
{
gl_Position = vec4(1.0f, 1.0f, 0.0f, 1.0f);
EmitVertex();
gl_Position = vec4(-1.0f, 1.0f, 0.0f, 1.0f);
EmitVertex();
gl_Position = vec4(1.0f, -1.0f, 0.0f, 1.0f);
EmitVertex();
gl_Position = vec4(-1.0f, -1.0f, 0.0f, 1.0f);
EmitVertex();
EndPrimitive();
}
Fragment Shader:
#version 330
layout (location = 3) out float out_shadow;
void main()
{
out_shadow = 1.0f;
}
Blitting the texture to the screen:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, members->rBuffer->fbo);
glReadBuffer(GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW);
glBlitFramebuffer
(
0, 0, eBuffer->windowWidth, eBuffer->windowHeight,
0, 0, eBuffer->windowWidth, eBuffer->windowHeight,
GL_COLOR_BUFFER_BIT, GL_LINEAR
);
It is safe to assume that RayBuffer_TextureType_SHADOW is 3. Furthermore, it should be noted I have stripped out all complexities of the geometry shader to try and find the origin of the problem. The code produces a completely black screen while I was expecting a completely red screen.
I believe the problem is with the way you bind your output buffer. The critical line is here:
layout (location = 3) out float out_shadow;
You seem to assume that the value 3 is needed to match the index of the color attachment of the FBO you render to:
glFramebufferTexture2D
(
GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW,
with a value of 3 for RayBuffer_TextureType_SHADOW.
This is not how the association of fragment shader output to FBO attachment works. The value you specify with the layout(location=...) qualifier is called the color number in most parts of the spec. For example on page 190 of the OpenGL 3.3 spec, while describing glBindFragDataLocationIndexed(), it talks about:
The binding of a user-defined varying out variable to a fragment color number [..]
and on the next page (emphasis added by me):
When a program is linked, any varying out variables without a binding specified either through BindFragDataLocationIndexed or BindFragDataLocation, or explicitly set within the shader text will automatically be bound to fragment colors and indices by the GL.
Now, these "color numbers" match up with the index of the draw buffers you specified. From the description of glDrawBuffer() on page 210 of the same document:
defines the set of color buffers to which fragment color zero is written.
So with your call:
glDrawBuffer(GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW);
you specify that color 0 produced by the fragment shader is written to attachment 3 of your FBO.
What this all means is that you need to specify color number 0 for the output of the fragment shader:
layout (location = 0) out float out_shadow;
Color numbers larger than 0 are only useful if you produce more than one output from your fragment shader. In that case, the location values specify the index of the color buffer within the list passed to glDrawBuffers() that the output is written to.

Cannot show texture in OpenGL 4.3 using VertexArrayObject

I'm knocking my head on this code since two days ago. It seems that there's no error of any sort creating buffers or textures, but the texture doesn't show.
Here is my code for the texture load:
struct image2d texImage = loadBMPImage(filePath);
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &(result.external->texID));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, result.external->texID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, texImage.width, texImage.height, 0, GL_RGB, GL_UNSIGNED_BYTE, texImage.pixels);
free(texImage.pixels);
The image2d structure is this one
struct image2d{
unsigned int width, height;
unsigned char* pixels;
};
Yes, I'm enabling GL_TEXTURE_2D via glEnable()
Then my mesh is drawn wiith this code
void MeshDraw(Mesh m, int renderType)
{
glBindVertexArray(m.external->vao);
glBindBuffer(GL_ARRAY_BUFFER, m.external->vbo);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, VERTEX_SIZE*4, 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, VERTEX_SIZE*4, (void*)12);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m.external->ibo);
glDrawElements(GL_TRIANGLES, m.external->sizeFc * 3, GL_UNSIGNED_SHORT, 0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
return;
}
And finally here is my vertex shader
#version 430 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 inTexCoord;
uniform mat4 transform;
out vec2 texCoord;
out vec3 outPos;
void main(void)
{
outPos = position;
gl_Position = transform * vec4(position, 1.0);
texCoord = inTexCoord;
}
And here is my fragment shader
#version 430 core
out vec4 drawColor;
in vec2 texCoord;
in vec3 outPos;
uniform sampler2D sampler;
void main(void)
{
drawColor = texture(sampler, texCoord);
//drawColor = vec4(clamp(outPos, 0.0, 1.0), 1.0);
}
If you need to look at the whole project I'm posting it here
I'll appreciate any kind of help :)
Additional code (which is also in download if anyone wants to see it)
void initOpenGL()
{
printf("OpenGL version: %s\n",glGetString(GL_VERSION));
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glFrontFace(GL_CW);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
glEnable(GL_FRAMEBUFFER_SRGB);
return;
}
This is called right after making the context and initializing glew.
void RenderGame(Game g)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
ShaderBind(g.external->sh);
setUniformmat4(g.external->sh, "transform", TransformGetProjectedTransformation(g.external->transf));
TextureBind(g.external->texture);
MeshDraw(g.external->msh, GL_TRIANGLES);
glFlush();
glfwSwapBuffers(WindowGetHandler(g.external->window));
return;
}
And this is my render method.
Your texture is not mipmap-complete, but you are still using the default GL_NEAREST_MIPMAP_LINEAR minification filter, so sampling the texture will fail.
You try to set it to GL_NEAREST, but this sqeuence of operations is wrong:
glGenTextures(1, &(result.external->texID));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, result.external->texID);
In the GL, the texture sampler state is part of the texture object itself (there are also separate sampler objects available nowadays which override that state, but you don't seem to use them either), and glTexParameteri() affects the currently bound texture object at the time of the call. I don't know if some texture is bound at that time, or none at all - but certainly, the new texture is not, so it will stick with the inital default of GL_NEAREST_MIPMAP_LINEAR...
SOLUTION It seems that #peppe was right all the time. Just to be meticulous i set the sampler to 0 with the setuniform call and it workef. The problem is that it didn't work as expected, and it was because the function that loads the bitmap file that was wrong. Now it works like a charm :) Thank you guys!

Can't spot the issue with my GLSL/OpenGL code

I wrote a little program to display a 32bit float texture in a simple quad. When displaying the quad, the texture color is always black. I experimented with a lot of things, but I couldn't make it work. I'm really at loss what the problem with it.
The code of creating the OpenGL texture goes like this
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, textureData);
Using the debugger, there's no error in any of these calls. I also examined the textureData pointer, and got the expected results (in my simplified program, it is just a gradient texture).
This is the vertex shader code in GLSL:
#version 400
in vec4 vertexPosition;
out vec2 uv;
void main() {
gl_Position = vertexPosition;
uv.x = (vertexPosition.x + 1.0) / 2;
uv.y = (vertexPosition.y + 1.0) / 2;
}
It's kind of a simple generation of the UV coordinates without taking them as vertex attributes. The corresponding vertex buffer object is really simple:
GLfloat vertices[4][4] = {
{ -1.0, 1.0, 0.0, 1.0 },
{ -1.0, -1.0, 0.0, 1.0 },
{ 1.0, 1.0, 0.0, 1.0 },
{ 1.0, -1.0, 0.0, 1.0 },
};
I've tested the solution, and it displays the quad covering the entire window as I wanted to. Displaying the UV coordinates in the fragment shader reproduce the gradient that I expected to get. Now here's the fragment shader:
#version 400
uniform sampler2D myTex;
in vec2 uv;
out vec4 fragColor;
void main() {
fragColor = texture(myTex, uv);
// fragColor += vec4(uv.x, uv.y, 0, 1);
}
The commented out line displays the UV coordinates as color for debugging purposes. What do I do wrong here? I just can't see why the texture() call returns 0 where the texture seems completely right, and the uv coordinates are also proper. I link here the full code if there's something else I do wrong: gl-view.c
EDIT: This is how I set up the myTex sampler:
glEnable(GL_TEXTURE_2D);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureID);
glUniform1i(glGetUniformLocation(shaderProgram, "myTex"), 0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
EDIT: Cleared up the vertex shader code.
I've found the issue: I didn't set any MAG or MIN filter on the texture. Setting the MIN filter to GL_NEAREST solved the problem.

GLSL Texture Mapping Results in a Solid Color

I'm trying to write some basic shaders to map a ppm file to my shapes. Unfortunately, instead of a nice multicoloured texture (I'm using a stone brick pattern), I get a solid shade of dark purple.
Here's my code:
Init:
printf("Using %d: Texture shading\n", shaderType);
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textName);
int w, h;
texture = glmReadPPM("brick.ppm", &w, &h);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
printf("W%dH%d\n", w, h);
glTexImage2D(GL_TEXTURE_2D, 0, 3, w, h, 0, GL_RGB, GL_UNSIGNED_BYTE, texture);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textName);
programID = LoadShaders("text.vert", "text.frag");
Render:
glClearColor( 0.6f, 0.85f, 1.0f, 1.0f );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glMatrixMode (GL_MODELVIEW);
glLoadIdentity();
/*Unrelated code here*/
glUseProgram(programID);
varloc = glGetUniformLocation(programID,"texture1");
glUniform1i(varloc, textName);
glLightfv(GL_LIGHT0, GL_SPOT_CUTOFF, &cutOff);
gluLookAt (posx, posy, zoom,
lookx,looky,0,
0,1,0);
glRotatef(anglex,0.0f,1.0f,0.0f);
glRotatef(angley,1.0f,0.0f,0.0f);
renderTriangles(); //Renders mountains from a list using intermediate mode
// Yes, I know it's deprecated
glutSwapBuffers();
glui->sync_live();
glUseProgram(0);
Vertex Shader:
varying vec2 uv;
void main() {
uv = vec2(gl_MultiTexCoord0.st);
gl_Position = ftransform();
}
Fragment Shader:
uniform sampler2D texture1;
varying vec2 uv;
void main() {
gl_FragColor = texture2D(texture1, uv);
}
Does anyone see any problems here? I can't seem to figure it out.
I tried with a basic White and Read 2x2 float, but again, I got one colour. It was a light red.
If you're getting a single colour for the whole object, there might be something wrong with the texture coordinates. I would try looking at them and see if they're correct. You can do that by modifying your fragment shader like this:
gl_FragColor = vec3(uv.xy, 0);
If your whole image is still rendered using one colour, there is something wrong with the way you're sending texture coordinates across. You're using some deprecated functionality (immediate mode, gl_MultiTexCoord0), maybe it's not working together as you would expect:
"Keep in mind that for GLSL 1.30, you should define your own vertex attribute." http://www.opengl.org/wiki/GLSL_:_common_mistakes
It looks like you are binding the texture after you have all of the other texture functions. You should put the call to glBindTexture right after the call to glGenTextures because you have to bind a texture before you can upload the image into it. The other problem is that instead of setting the uniform variable for your sampler to textName in the call to glUniform1i(varloc, textName) you should set it to 0 because that variable represents the active texture unit and you used glActiveTexture(GL_TEXTURE0);

OpenGL Lighting Failing when Scaling

I have to read a 3D object from an ASE file. This object turns to be too big for the world I have to create, therefore, I must scale it down.
With its original size, it is properly lighted up.
However, once I scale it down, it becomes oversaturated.
The world is centered around (0, 0, 0) and it is 100 meters long (y axis) and 50 meters wide (x axis), my upVector is (0, 0, 1). There are two lights, light0 in (20, 35, 750) and light1 in (-20, -35, 750).
Relevant parts of the code:
void init(void){
glClearColor(0.827, 0.925, 0.949, 0.0);
glEnable(GL_DEPTH_TEST);
glEnable(GL_COLOR_MATERIAL);
glColorMaterial(GL_FRONT, GL_DIFFUSE);
glEnable(GL_LIGHT0);
glEnable(GL_LIGHT1);
glEnable(GL_LIGHTING);
glShadeModel(GL_SMOOTH);
GLfloat difusa[] = { 1.0f, 1.0f, 1.0f, 1.0f}; // white light
glLightfv(GL_LIGHT0, GL_DIFFUSE, difusa);
glLightfv(GL_LIGHT1, GL_DIFFUSE, difusa);
loadObjectFromFile("objeto.ASE");
}
void display ( void ) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(eyeX, eyeY, eyeZ, atX, atY, atZ, 0.0, 0.0, 1.0);
GLfloat posicion0[] = { 20.0f, 35.0f, 750.0f, 1.0f};
glLightfv(GL_LIGHT0, GL_POSITION, posicion0);
GLfloat posicion1[] = { -20.0f, -35.0f, 750.0f, 1.0f};
glLightfv(GL_LIGHT1, GL_POSITION, posicion1);
glColor3f(0.749, 0.918, 0.278);
glPushMatrix();
glTranslatef(0.0, 0.0, 1.5);
//Here comes the problem
glScalef(0.08, 0.08, 0.08);
glBegin(GL_TRIANGLES);
for(int i = 0; i < numFaces; i++){
glNormal3d(faces3D[i].n.nx, faces3D[i].n.ny, faces3D[i].n.nz);
glVertex3d(vertex[faces3D[i].s.A].x, vertex[faces3D[i].s.A].y, vertex[faces3D[i].s.A].z);
glVertex3d(vertex[faces3D[i].s.B].x, vertex[faces3D[i].s.B].y, vertex[faces3D[i].s.B].z);
glVertex3d(vertex[faces3D[i].s.C].x, vertex[faces3D[i].s.C].y, vertex[faces3D[i].s.C].z);
}
glEnd();
glPopMatrix();
glutSwapBuffers();
}
Why does lighting fail when the object is scaled down?
The problem you're running into is, that scaling the modelview matrix also influences the "normal matrix" normals are transformed with. The "normal matrix" is actually the transpose of the inverse of the modelview matrix. So by scaling down the modelview matrix, you're scaling up the normal matrix (because of the modelview inversion step used to obtain it).
Because of that the transformed normals must be rescaled, or normalized if the scale of the modelview matrix is not unitary. In fixed function OpenGL there are two methods to do this: Normal normalization (sounds funny, I know) and normal rescaling. You can enable either with
glEnable(GL_NORMALIZE);
glEnable(GL_RESCALE_NORMALS);
In a shader you'd simply normalize the transformed normal
#version ...
uniform mat3 mat_normal;
in vec3 vertex_normal;
void main()
{
...
vec3 view_normal = normalize( mat_normal * vertex_normal );
...
}
Depending on the setting of GL_NORMALIZE and GL_RESCALE_NORMALS, your normals can be transformed by the OpenGL-Pipeline.
Start with glEnable(GL_NORMALIZE) and see if that solves your problem

Resources