Object not rendering after adding transformations - c

I'm adding transformations to my C OpenGL program. I'm using CGLM as my maths library. The program has no warnings or errors. Still however, when I compile and run the program, I simply get a window coloured my clear colour. The following is my program's main loop
// Initialize variables for framerate counting
double lastTime = glfwGetTime();
int frameCount = 0;
// Program loop
while (!glfwWindowShouldClose(window)) {
// Calculate framerate
double thisTime = glfwGetTime();
frameCount++;
// If a second has passed.
if (thisTime - lastTime >= 1.0) {
printf("%i FPS\n", frameCount);
frameCount = 0;
lastTime = thisTime;
}
processInput(window);
// Clear the window
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Bind textures on texture units
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture2);
// Create transformations
mat4 transform = {{1.0f}};
glm_translate(transform, (vec3){0.5f, -0.5f, 0.0f});
glm_rotate(transform, (float)glfwGetTime(), (vec3){0.0f, 0.0f, 1.0f});
printf("%i\n", transform);
// Get matrix's uniform location and set matrix
shaderUse(myShaderPtr);
GLint transformLoc = glGetUniformLocation(myShaderPtr->shaderID, "transform");
printf("%i\n", transformLoc);
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, *transform);
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glfwSwapBuffers(window); // Swap the front and back buffers
glfwPollEvents(); // Check for events (mouse movement, mouse click, keyboard press, keyboard release etc.)
}
The Program is up on github here if you'd like to check out the full code.
The output of this program is
However, the intended output is a spinning box with my profile picture on it.

mat4 transform = {{1.0f}}; does not do what you expect. C doesn't have a constructor like C++. The C++ version's constructor initialized the matrix with the Identity matrix. You have to use glm_mat4_identity to initialize with the identity matrix:
mat4 transform;
glm_mat4_identity(transform);
glm_rotate(transform, (float)glfwGetTime(), (vec3){0.0f, 0.0f, 1.0f});
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, (float*)transform);
Additionally, you need to specify and add an orthographic projection matrix that compensates for the aspect ratio of the viewport:
float aspect = (float)width / (float)height;
mat4 projection;
glm_ortho(-aspect, aspect, -1.0f, 1.0f, -1.0f, 1.0f, projection)
mat4 transform;
glm_rotate(transform, (float)glfwGetTime(), (vec3){0.0f, 0.0f, 1.0f});
glm_mat4_identity(transform);
mat4 mvp;
glm_mat4_mul(projection, transform, mvp);
GLint transformLoc = glGetUniformLocation(myShaderPtr->shaderID, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, (float*)mvp);

Related

how to use glTranslatef,glScalef,glRotatef in openGL

I just want something like this video : https://youtu.be/dGWtdYlryQQ
It shows how to use glTranslate, glRotate, gluOrtho2d in OpenGL ,but it's not guide me anything
In my case, I draw a diamond instead of triangle and here is my condition
condition :
when I press r or R on the keyboard the diamond will rotate clockwise
when I press t or T on the keyboard the diamond will move to the right side
when I press + on the keyboard the diamond will bigger
here is my code :
#include <stdlib.h>
#include <windows.h>
#include <GL/glut.h>
#include <iostream>
using namespace std;
float angle = 0;
float t,s=0.5,m=0;
void myinit(void){
glClearColor(1.0,1.0,1.0,0.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0,1.0,0.0,1.0,-1.0,1.0);
}
void keyboard(unsigned char key, int x, int y){
if(key==27)
{
exit(0);
}else if(key == 82 || key == 114){
angle-=0.1;
glRotatef(angle,0,0,1);
glutPostRedisplay();
}else if(key == 84 || key == 116 )
{
t+=0.01;
glTranslatef(t,0,0);
glutPostRedisplay();
}else if(key == 43){
s+=0.01;
// m-=0.1;
// glTranslatef(m,m,0.0);
glScalef(s,s,0);
glutPostRedisplay();
}
(void)(x);
(void)(y);
}
void hut(void){
glClear(GL_COLOR_BUFFER_BIT);
glBegin(GL_TRIANGLE_FAN);
glVertex3f(0.5,0.4,0.0);
glVertex3f(0.42,0.5,0.0); // GREEN
glVertex3f(0.44,0.5,0.0);
glColor3f(1.5,1.0,0.0);
glVertex3f(0.46,0.5,0.0);
glColor3f(0.25,0.0,0.0);
glVertex3f(0.57,0.5,0.0);
glEnd();
glFlush();
glColor3f(1.5,1.0,0.0);
glBegin(GL_TRIANGLE_FAN);
glVertex3f(0.44,0.55,0.0);
glVertex3f(0.42,0.5,0.0);
glVertex3f(0.46,0.5,0.0);
glColor3f(0.25,0.0,0.0);
glVertex3f(0.48,0.55,0.0);
glEnd();
glColor3f(1.5,1.0,0.0);
glBegin(GL_TRIANGLE_FAN);
glVertex3f(0.48,0.55,0.0);
glVertex3f(0.46,0.5,0.0);
glVertex3f(0.50,0.5,0.0);
glColor3f(0.25,0.0,0.0);
glVertex3f(0.52,0.55,0.0);
glEnd();
glColor3f(1.5,1.0,0.0);
glBegin(GL_TRIANGLE_FAN);
glVertex3f(0.52,0.55,0.0);
glVertex3f(0.50,0.5,0.0);
glVertex3f(0.54,0.5,0.0);
glColor3f(0.25,0.0,0.0);
glVertex3f(0.56,0.55,0.0);
glEnd();
glColor3f(1.5,1.0,0.0);
glBegin(GL_TRIANGLE_FAN);
glVertex3f(0.56,0.55,0.0);
glVertex3f(0.54,0.5,0.0);
glVertex3f(0.57,0.5,0.0);
glEnd();
glFlush();
}
int main(int argc,char** argv){
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowPosition(100,100);
glutInitWindowSize(640,480);
glutCreateWindow("Polygon with viewport");
myinit();
glutDisplayFunc(hut);
glutKeyboardFunc(keyboard);
glutMainLoop();
}
And here is my output : https://drive.google.com/file/d/14HHRiCbOHK9ZSZtDOqSl4GP4aSy7UQLh/view?usp=sharing
It it’s not similar to this https://youtu.be/dGWtdYlryQQ
The operations on the matrix stack are based on one another. The reference system of each operation is the current transformation.
See the documentation of glTranslate:
glTranslate produces a translation by x y z . The current matrix (see glMatrixMode) is multiplied by this translation matrix, with the product replacing the current matrix, [...]
and see the documentation of glRotate:
glRotate produces a rotation of angle degrees around the vector x y z . The current matrix (see glMatrixMode) is multiplied by a rotation matrix with the product replacing the current matrix.
This means that glRotate does a rotation around the origin of the current local system.
While glRotatetf followed by glTranslatef results in:
glTranslatef followed by glRotatef results in:
Since you object is displaced, you have to translate it in that way, that the rotation point is placed in the origin:
glTranslatef(-0.5f, -0.5f, 0.0f);
Then you can rotate it:
glRotatef(angle, 0.0f, 0.0f, 1.0f);
And move it back:
glTranslatef(0.5f, 0.5f, 0.0f);
Note, on the Fixed Function Pipeline stack you have to "push" this operations in the reverse order. Further you should use the GL_MODELVIEW matrix stack. (See glMatrixMode.)
Remove all the matrix operations from the function keyboard and add the following to the function hut:
void hut(void)
{
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(t, 0.0f, 0.0f);
glTranslatef(0.5f, 0.5f, 0.0f);
glRotatef(angle, 0.0f, 0.0f, 1.0f);
glScalef(s, s, 0.0f);
glTranslatef(-0.5f, -0.5f, 0.0f);
.....
Further, your object gets destroyed by the aspect ratio of the view. This can be fixed by taking care of the aspect ratio when setting up the projection matrix:
float w = 640.0f;
float h = 480.0f;
glOrtho(0.0,w/h,0.0,1.0,-1.0,1.0);
glRotate rotates about the origin (0,0). Given your projection matrix (that you set with glOrtho) the origin is initially at the lower left corner of your screen, unless you use glTranslate. Your diamond is not centered at the origin, but positioned somewhat away from it. What you need to do is change the vertex values in your void hut(void) method to make the diamond centered at 0,0. Then use glTranslate to move the render origin (and thus also the diamond) to where you want it, then use glRotate.

GLSL Shader going black when I try to sample a texture

I render a triangle strip this way, and with basic bypass shaders all is working fine:
EDIT:
I added TextCoords and modified the shaders , I keep getting the same result, my 3d objects are going black!
UPDATED CODE:
// Dibuixem tots els prismes
glBegin(GL_TRIANGLE_STRIP);
for(i=0;i<num_elems;i++) {
for(j=0;j<num_vertices;j++) {
glNormal3fv((GLfloat *)(a+j*2));
glTexCoord2f(0.0f, 0.0f);
glVertex3fv((GLfloat *)(a+j*2+1));
glTexCoord2f(1.0f, 0.0f);
glNormal3fv((GLfloat *)(b+j*2));
glTexCoord2f(1.0f, 1.0f);
glVertex3fv((GLfloat *)(b+j*2+1));
}
glNormal3fv((GLfloat *)(a));
glTexCoord2f(0.0f, 1.0f);
glVertex3fv((GLfloat *)(a+1));
glNormal3fv((GLfloat *)(b));
glTexCoord2f(0.0f, 0.0f);
glVertex3fv((GLfloat *)(b+1));
a+=face_size;
b+=face_size;
}
glEnd();
And I am trying to attach a texture to my shaders, but I can't figure out how to pass the texture.
I create and add the texture to my program this way. Texture data is verified
array with format unsigned char data[imageSize];:
GLuint textureID;
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
glActiveTexture(GL_TEXTURE0); // Texture unit 0
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0,GL_BGR, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
This is what I read in another posts with the same issue and I added to my code after compiling my shaders and generating my program without errors.
Tutorials tend to dismiss this information (how you say to your shader the name and location of your binded texture).
GLuint t1Location = glGetUniformLocation(programID, "tex1");
glUniform1i(t1Location, 0);
And my shaders UPDATED CODE:
#define GLSL(version, shader) "#version " #version "\n" #shader
const char* vert = GLSL
(
110,
varying vec4 position;
varying vec3 normal;
varying out vec4 texCoord;
varying vec2 coord;
void main()
{
position = gl_ModelViewMatrix * gl_Vertex;
normal = normalize( gl_NormalMatrix * gl_Normal.xyz );
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
coord = vec2(gl_MultiTexCoord0);
}
);
const char* frag = GLSL
(
110,
uniform sampler2D tex1;
varying vec4 position;
varying vec3 normal;
varying vec2 coord;
void main()
{
gl_FragColor = texture2D(tex1, coord);
}
);
EDIT2:
I am setting up gl this way:(maybe something is conflicting with my texture shader, but I don't think so!
/* set up depth-buffering */
glEnable(GL_DEPTH_TEST);
glEnable(GL_POLYGON_SMOOTH);
glHint(GL_POLYGON_SMOOTH_HINT, GL_FASTEST);
/* set up lights */
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL);
glShadeModel(GL_SMOOTH);
GLfloat lightpos[] = { 3.0, 0.0, 1.0, 0.0 };
GLfloat lightcolor[] = { 0.5, 0.5, 0.5, 1.0 };
GLfloat ambcolor[] = { 0.5, 0.5, 0.5, 1.0 };
glLightModelfv(GL_LIGHT_MODEL_AMBIENT,ambcolor);
glEnable(GL_LIGHTING);
glColorMaterial(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE);
glEnable (GL_COLOR_MATERIAL);
glLightfv (GL_LIGHT0,GL_POSITION,lightpos);
glLightfv (GL_LIGHT0,GL_AMBIENT,ambcolor);
glLightfv (GL_LIGHT0,GL_DIFFUSE,lightcolor);
glLightfv (GL_LIGHT0,GL_SPECULAR,lightcolor);
glLightf (GL_LIGHT0,GL_CONSTANT_ATTENUATION,0.2);
glLightf (GL_LIGHT0,GL_LINEAR_ATTENUATION,0.0);
glLightf (GL_LIGHT0,GL_QUADRATIC_ATTENUATION,1.0);
glEnable (GL_LIGHT0);
glEnable(GL_TEXTURE_2D);
Replacing gl_FragColor by a flat color is working fine.
I know maybe is related to coord parameter but I am trying all the stuff I found and nothing is working for me.
The internal texture format GL_BGR is not valid. GL_BGR is a valid for the format of the source texture, but the internal representation has to be GL_RGB.
See glTexImage2D.
Adapt your code like this:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
See the Khronos reference page GLAPI/glTexImage2D which says:
To define texture images, call glTexImage2D. The arguments describe the parameters of the texture image, such as height, width, width of the border, level-of-detail number (see glTexParameter), and number of color components provided. The last three arguments describe how the image is represented in memory.
format​ determines the composition of each element in data​. It can assume one of these symbolic values:
GL_BGR:
Each element is an RGB triple. The GL converts it to floating point and assembles it into an RGBA element by attaching 1 for alpha. Each component is clamped to the range [0,1].

Writing to a GL_R8 texture

I am having some trouble writing to a texture (GL_R8 Format) attached to an FBO while reading from another texture attached to the same FBO (GL_RGB32F). I believe the problem is with the output type in my fragment shader.
Texture Initialization:
glGenFramebuffers(1, &rBuffer->fbo);
glBindFramebuffer(GL_FRAMEBUFFER, rBuffer->fbo);
glGenTextures(RayBuffer_TextureType_NUMTEXTURES, rBuffer->textures);
...
glBindTexture(GL_TEXTURE_2D, rBuffer->textures[RayBuffer_TextureType_SHADOW]);
glTexImage2D
(
GL_TEXTURE_2D,
0,
GL_R8,
textureWidth,
textureHeight,
0,
GL_RED,
GL_UNSIGNED_BYTE,
NULL
);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glFramebufferTexture2D
(
GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW,
GL_TEXTURE_2D,
rBuffer->textures[RayBuffer_TextureType_SHADOW],
0
);
Binding:
glBindFramebuffer(GL_FRAMEBUFFER, rBuffer->fbo);
glDrawBuffer(GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW);
glActiveTexture(GL_TEXTURE0 + RayBuffer_TextureType_POSITION);
glBindTexture(GL_TEXTURE_2D, rBuffer->textures[RayBuffer_TextureType_POSITION]);
Geometry Shader:
#version 330
layout(triangles) in;
layout (triangle_strip, max_vertices=4) out;
uniform sampler2D positionTexture;
uniform vec3 lightDirection;
uniform vec3 rightDirection;
uniform vec3 upDirection;
uniform vec2 screenSize;
void main()
{
gl_Position = vec4(1.0f, 1.0f, 0.0f, 1.0f);
EmitVertex();
gl_Position = vec4(-1.0f, 1.0f, 0.0f, 1.0f);
EmitVertex();
gl_Position = vec4(1.0f, -1.0f, 0.0f, 1.0f);
EmitVertex();
gl_Position = vec4(-1.0f, -1.0f, 0.0f, 1.0f);
EmitVertex();
EndPrimitive();
}
Fragment Shader:
#version 330
layout (location = 3) out float out_shadow;
void main()
{
out_shadow = 1.0f;
}
Blitting the texture to the screen:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, members->rBuffer->fbo);
glReadBuffer(GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW);
glBlitFramebuffer
(
0, 0, eBuffer->windowWidth, eBuffer->windowHeight,
0, 0, eBuffer->windowWidth, eBuffer->windowHeight,
GL_COLOR_BUFFER_BIT, GL_LINEAR
);
It is safe to assume that RayBuffer_TextureType_SHADOW is 3. Furthermore, it should be noted I have stripped out all complexities of the geometry shader to try and find the origin of the problem. The code produces a completely black screen while I was expecting a completely red screen.
I believe the problem is with the way you bind your output buffer. The critical line is here:
layout (location = 3) out float out_shadow;
You seem to assume that the value 3 is needed to match the index of the color attachment of the FBO you render to:
glFramebufferTexture2D
(
GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW,
with a value of 3 for RayBuffer_TextureType_SHADOW.
This is not how the association of fragment shader output to FBO attachment works. The value you specify with the layout(location=...) qualifier is called the color number in most parts of the spec. For example on page 190 of the OpenGL 3.3 spec, while describing glBindFragDataLocationIndexed(), it talks about:
The binding of a user-defined varying out variable to a fragment color number [..]
and on the next page (emphasis added by me):
When a program is linked, any varying out variables without a binding specified either through BindFragDataLocationIndexed or BindFragDataLocation, or explicitly set within the shader text will automatically be bound to fragment colors and indices by the GL.
Now, these "color numbers" match up with the index of the draw buffers you specified. From the description of glDrawBuffer() on page 210 of the same document:
defines the set of color buffers to which fragment color zero is written.
So with your call:
glDrawBuffer(GL_COLOR_ATTACHMENT0 + RayBuffer_TextureType_SHADOW);
you specify that color 0 produced by the fragment shader is written to attachment 3 of your FBO.
What this all means is that you need to specify color number 0 for the output of the fragment shader:
layout (location = 0) out float out_shadow;
Color numbers larger than 0 are only useful if you produce more than one output from your fragment shader. In that case, the location values specify the index of the color buffer within the list passed to glDrawBuffers() that the output is written to.

OpenGL - Rotating moon around sun without it spinning?

I'm working on a graphics model of the Moon rotating around the Earth. Right now, the Moon spins on its y axis while rotating around the Earth. How can I prevent the Moon from spinning but still allow it to orbit? Here's the code..
Edit:
Added an animation video to demonstrate problem:
http://www.youtube.com/watch?v=ltGV4pXD5Cs
void DrawInhabitants(GLint nShadow)
{
static GLfloat yRot = 0.0f; // Rotation angle for animation
if(nShadow == 0)
{
yRot += 0.2f;
}
// Draw the randomly located spheres
glBindTexture(GL_TEXTURE_2D, textureObjects[MOON_TEXTURE]);
glPushMatrix();
glTranslatef(0.0f, 0.1f, -2.5f);
glPushMatrix();
glRotatef(-yRot * 2.0f, 0.0f, 1.0f, 0.0f);
glTranslatef(1.0f, 0.0f, 0.0f);
gltDrawSphere(0.1f,21, 11);
glPopMatrix();
if(nShadow == 0)
{
// Torus alone will be specular
glMaterialfv(GL_FRONT, GL_SPECULAR, fBrightLight);
}
glRotatef(-yRot, 0.0f, 1.0f, 0.0f);
glBindTexture(GL_TEXTURE_2D, textureObjects[EARTH_TEXTURE]);
gltDrawSphere(0.3f, 21, 11);
glMaterialfv(GL_FRONT, GL_SPECULAR, fNoLight);
glPopMatrix();
}
The problem is that you're rotating the coordinate system in order to place the moon in its desired relative position. This rotation is global so it affects the orientation of the moon as well. You need to undo the rotation after translating, so you have "translation sandwich"
rotate a
translate
rotate -a

OpenGL Lighting Failing when Scaling

I have to read a 3D object from an ASE file. This object turns to be too big for the world I have to create, therefore, I must scale it down.
With its original size, it is properly lighted up.
However, once I scale it down, it becomes oversaturated.
The world is centered around (0, 0, 0) and it is 100 meters long (y axis) and 50 meters wide (x axis), my upVector is (0, 0, 1). There are two lights, light0 in (20, 35, 750) and light1 in (-20, -35, 750).
Relevant parts of the code:
void init(void){
glClearColor(0.827, 0.925, 0.949, 0.0);
glEnable(GL_DEPTH_TEST);
glEnable(GL_COLOR_MATERIAL);
glColorMaterial(GL_FRONT, GL_DIFFUSE);
glEnable(GL_LIGHT0);
glEnable(GL_LIGHT1);
glEnable(GL_LIGHTING);
glShadeModel(GL_SMOOTH);
GLfloat difusa[] = { 1.0f, 1.0f, 1.0f, 1.0f}; // white light
glLightfv(GL_LIGHT0, GL_DIFFUSE, difusa);
glLightfv(GL_LIGHT1, GL_DIFFUSE, difusa);
loadObjectFromFile("objeto.ASE");
}
void display ( void ) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(eyeX, eyeY, eyeZ, atX, atY, atZ, 0.0, 0.0, 1.0);
GLfloat posicion0[] = { 20.0f, 35.0f, 750.0f, 1.0f};
glLightfv(GL_LIGHT0, GL_POSITION, posicion0);
GLfloat posicion1[] = { -20.0f, -35.0f, 750.0f, 1.0f};
glLightfv(GL_LIGHT1, GL_POSITION, posicion1);
glColor3f(0.749, 0.918, 0.278);
glPushMatrix();
glTranslatef(0.0, 0.0, 1.5);
//Here comes the problem
glScalef(0.08, 0.08, 0.08);
glBegin(GL_TRIANGLES);
for(int i = 0; i < numFaces; i++){
glNormal3d(faces3D[i].n.nx, faces3D[i].n.ny, faces3D[i].n.nz);
glVertex3d(vertex[faces3D[i].s.A].x, vertex[faces3D[i].s.A].y, vertex[faces3D[i].s.A].z);
glVertex3d(vertex[faces3D[i].s.B].x, vertex[faces3D[i].s.B].y, vertex[faces3D[i].s.B].z);
glVertex3d(vertex[faces3D[i].s.C].x, vertex[faces3D[i].s.C].y, vertex[faces3D[i].s.C].z);
}
glEnd();
glPopMatrix();
glutSwapBuffers();
}
Why does lighting fail when the object is scaled down?
The problem you're running into is, that scaling the modelview matrix also influences the "normal matrix" normals are transformed with. The "normal matrix" is actually the transpose of the inverse of the modelview matrix. So by scaling down the modelview matrix, you're scaling up the normal matrix (because of the modelview inversion step used to obtain it).
Because of that the transformed normals must be rescaled, or normalized if the scale of the modelview matrix is not unitary. In fixed function OpenGL there are two methods to do this: Normal normalization (sounds funny, I know) and normal rescaling. You can enable either with
glEnable(GL_NORMALIZE);
glEnable(GL_RESCALE_NORMALS);
In a shader you'd simply normalize the transformed normal
#version ...
uniform mat3 mat_normal;
in vec3 vertex_normal;
void main()
{
...
vec3 view_normal = normalize( mat_normal * vertex_normal );
...
}
Depending on the setting of GL_NORMALIZE and GL_RESCALE_NORMALS, your normals can be transformed by the OpenGL-Pipeline.
Start with glEnable(GL_NORMALIZE) and see if that solves your problem

Resources