I am using CUDA to generate this ABGR output image. The image in question is stored in a uchar4 array. Each element of the array represents the color of each pixel in the image. Obviously, this output array is a 2D image but it is allocated in CUDA as a linear memory of interleaved bytes.
I know that CUDA can easily map this array to an OpenGL Vertex Buffer Object. My question is, assuming that I have the RGB value of every pixel in an image, along with the width and height of the image, how can I draw this image to screen using OpenGL?
I know that some kind of shader must be involved but since my knowledge is very little, I have no idea how a shader can use the color of each pixel, but map it to correct screen pixels.
I know I should increase my knowledge in OpenGL, but this seems like a trivial task.
If there is an easy way for me to draw this image, I'd rather not spend much time learning OpenGL.
I finally figured out an easy way to do what I wanted. Unfortunately, I did not know about the existence of the sample that Robert was talking about on NVIDIA's website.
Long story short, the easiest way to draw the image was to define a Pixel Buffer Object in OpenGL, register the buffer with CUDA and pass it as an output array of uchar4 to the CUDA kernel. Here is a quick pseudo-code based on JOGL and JCUDA that shows the steps involved. Most of the code was obtained from the sample on NVIDIA's website:
1) Creaing the OpenGL buffers
GL2 gl = drawable.getGL().getGL2();
int[] buffer = new int[1];
// Generate buffer
gl.glGenBuffers(1, IntBuffer.wrap(buffer));
glBuffer = buffer[0];
// Bind the generated buffer
gl.glBindBuffer(GL2.GL_ARRAY_BUFFER, glBuffer);
// Specify the size of the buffer (no data is pre-loaded in this buffer)
gl.glBufferData(GL2.GL_ARRAY_BUFFER, imageWidth * imageHeight * 4, (Buffer)null, GL2.GL_DYNAMIC_DRAW);
gl.glBindBuffer(GL2.GL_ARRAY_BUFFER, 0);
// The bufferResource is of type CUgraphicsResource and is defined as a class field
this.bufferResource = new CUgraphicsResource();
// Register buffer in CUDA
cuGraphicsGLRegisterBuffer(bufferResource, glBuffer, CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
2) Initialize the texture and set texture parameters
GL2 gl = drawable.getGL().getGL2();
int[] texture = new int[1];
gl.glGenTextures(1, IntBuffer.wrap(texture));
this.glTexture = texture[0];
gl.glBindTexture(GL2.GL_TEXTURE_2D, glTexture);
gl.glTexParameteri(GL2.GL_TEXTURE_2D, GL2.GL_TEXTURE_MIN_FILTER, GL2.GL_LINEAR);
gl.glTexParameteri(GL2.GL_TEXTURE_2D, GL2.GL_TEXTURE_MAG_FILTER, GL2.GL_LINEAR);
gl.glTexImage2D(GL2.GL_TEXTURE_2D, 0, GL2.GL_RGBA8, imageWidth, imageHeight, 0, GL2.GL_BGRA, GL2.GL_UNSIGNED_BYTE, (Buffer)null);
gl.glBindTexture(GL2.GL_TEXTURE_2D, 0);
3) Run the CUDA kernel and display the results in OpenGL's display loop.
this.runCUDA();
GL2 gl = drawable.getGL().getGL2();
gl.glBindBuffer(GL2.GL_PIXEL_UNPACK_BUFFER, glBuffer);
gl.glBindTexture(GL2.GL_TEXTURE_2D, glTexture);
gl.glTexSubImage2D(GL2.GL_TEXTURE_2D, 0, 0, 0,
imageWidth, imageHeight,
GL2.GL_RGBA, GL2.GL_UNSIGNED_BYTE, 0); //The last argument must be ZERO! NOT NULL! :-)
gl.glBindBuffer(GL2.GL_PIXEL_PACK_BUFFER, 0);
gl.glBindBuffer(GL2.GL_PIXEL_UNPACK_BUFFER, 0);
gl.glBindTexture(GL2.GL_TEXTURE_2D, glTexture);
gl.glEnable(GL2.GL_TEXTURE_2D);
gl.glDisable(GL2.GL_DEPTH_TEST);
gl.glDisable(GL2.GL_LIGHTING);
gl.glTexEnvf(GL2.GL_TEXTURE_ENV, GL2.GL_TEXTURE_ENV_MODE, GL2.GL_REPLACE);
gl.glMatrixMode(GL2.GL_PROJECTION);
gl.glPushMatrix();
gl.glLoadIdentity();
gl.glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0);
gl.glMatrixMode(GL2.GL_MODELVIEW);
gl.glLoadIdentity();
gl.glViewport(0, 0, imageWidth, imageHeight);
gl.glBegin(GL2.GL_QUADS);
gl.glTexCoord2f(0.0f, 1.0f);
gl.glVertex2f(-1.0f, -1.0f);
gl.glTexCoord2f(1.0f, 1.0f);
gl.glVertex2f(1.0f, -1.0f);
gl.glTexCoord2f(1.0f, 0.0f);
gl.glVertex2f(1.0f, 1.0f);
gl.glTexCoord2f(0.0f, 0.0f);
gl.glVertex2f(-1.0f, 1.0f);
gl.glEnd();
gl.glMatrixMode(GL2.GL_PROJECTION);
gl.glPopMatrix();
gl.glDisable(GL2.GL_TEXTURE_2D);
3.5) The CUDA call:
public void runCuda(GLAutoDrawable drawable) {
devOutput = new CUdeviceptr();
// Map the OpenGL buffer to a resource and then obtain a CUDA pointer to that resource
cuGraphicsMapResources(1, new CUgraphicsResource[]{bufferResource}, null);
cuGraphicsResourceGetMappedPointer(devOutput, new long[1], bufferResource);
// Setup the kernel parameters making sure that the devOutput pointer is passed to the kernel
Pointer kernelParams =
.
.
.
.
int gridSize = (int) Math.ceil(imageWidth * imageHeight / (double)DESC_BLOCK_SIZE);
cuLaunchKernel(function,
gridSize, 1, 1,
DESC_BLOCK_SIZE, 1, 1,
0, null,
kernelParams, null);
cuCtxSynchronize();
// Unmap the buffer so that it can be used in OpenGL
cuGraphicsUnmapResources(1, new CUgraphicsResource[]{bufferResource}, null);
}
PS: I thank Robert for providing the link to the sample. I also thank the people who downvoted my question without any useful feedback!
Related
I'm quite new to font rendering and I'm trying to generate signed distance field with freetype so that it can be used in fragment shader in OpenGL. Here is the code that I tried:
error = FT_Load_Glyph(face, glyph_index, FT_LOAD_DEFAULT);
if (error)
{
// Handle error
}
error = FT_Render_Glyph(face->glyph, FT_RENDER_MODE_SDF);
if (error)
{
// Handle error
}
Maybe I completly misunderstand the idea of SDF, but my thought was that I could give freetype a ttf file and with FT_RENDER_MODE_SDF it should produce a buffer with signed distances. But FT_Render_Glyph returns an error (19) which happens to be "cannot render this glyph format".
SDF support was added at the end of 2020, with a new module in the second half of 2021, so make sure you have a more recent version than that. For example, 2.6 is older than 2.12.0 (the newest at the time of writing).
With that out of the way, let's get started.
I'm assuming you've completed the font rendering tutorial from LearnOpenGL and you can successfully render text on the screen. You should have something like this (notice the new additions):
glPixelStorei(GL_UNPACK_ALIGNMENT, 1); // Disable byte-alignment restriction
FT_GlyphSlot slot = face->glyph; // <-- This is new
for (unsigned char c = 0; c < 128; c++)
{
// Load character glyph
if (FT_Load_Char(face, c, FT_LOAD_RENDER))
{
// error message
continue;
}
FT_Render_Glyph(slot, FT_RENDER_MODE_SDF); // <-- And this is new
// Generate texture
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D( ... );
...
}
When rendering the text, you have to tell OpenGL not to write the fragments of the quads to the depth buffer, otherwise adjacent glyphs will overlap and start to flicker:
glDepthMask(GL_FALSE); // Don't write into the depth buffer
RenderText(pTextShader, text, 25.0f, 25.0f, 1.0f, glm::vec3(0.5, 0.8f, 0.2f));
glDepthMask(GL_TRUE); // Re-enable writing to the depth buffer
If you want to place the text as an object in your scene, in world-space, then in the vertex shader you can use:
gl_Position = uVp * uModel * vec4(vertex.xy, 0.0, 1.0); // uVp is "projection * view" on the CPU side
However, this is a bit outside the scope of your question. It just makes it easier to inspect the text from all angles by circling the camera around it. Make sure you run glDisable(GL_CULL_FACE) before drawing the glyphs, to disable backface culling, so they're visible from both sides.
As for the fragment shader I suggest you watch this video.
The bare minimum would be:
void main()
{
float glyphShape = texture(uGlyphTexture, TexCoords).r;
if (glyphShape < 0.5)
discard;
oFragColor = vec4(uTextColor, 1.0);
}
Result:
I think there's a pretty stark difference between them, wouldn't you say?
Have fun!
I am trying to crop an image loaded thanks to SOIL library, before using it as a texture.
So first, how can I load an image, and then convert it to a texture ?
And secondly, how to modify (crop, etc..) the image loaded ?
This is what I would like to do:
unsigned char * img = SOIL_load_image("img.png", &w, &h, &ch, SOIL_LOAD_RGBA);
// crop img ...
// cast it into GLuint texture ...
You can load a portion of your image by utilizing the glPixelStorei functionality:
// the location and size of the region to crop, in pixels:
int cropx = ..., cropy = ..., cropw = ..., croph = ...;
// tell OpenGL where to start reading the data:
glPixelStorei(GL_UNPACK_SKIP_PIXELS, cropx);
glPixelStorei(GL_UNPACK_SKIP_ROWS, cropy);
// tell OpenGL how many pixels are in a row of the full image:
glPixelStorei(GL_UNPACK_ROW_LENGTH, w);
// load the data to a previously created texture
glTextureSubImage2D(texure, 0, 0, 0, cropw, croph, GL_SRGB8_ALPHA8, GL_UNSIGNED_BYTE, img);
Here's a diagram from the OpenGL spec that might help:
EDIT: If you're using older OpenGL (older than 4.5) then replace the glTextureSubImage2D call with:
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB8_ALPHA8, cropw, croph, 0, GL_RGBA, GL_UNSIGNED_BYTE, img);
Make sure to create and bind the texture prior to this call (same way you create textures normally).
I want to get an earth texture on sphere. My sphere is an icosphere built with many triangles (100+) and I found it confusing to set the UV coordinates for whole sphere. I tried to use glTexGen and effects are quite close but I got my texture repeated 8 times (see image) . I cannot find a way to make it just wrap the whole object once. Here is my code where the sphere and textures are created.
glEnable(GL_TEXTURE_2D);
glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR);
glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR);
glBegin(GL_TRIANGLES);
for (int i = 0; i < new_sphere->NumOfTrians; i++)
{
Triangle *draw_Trian = new_sphere->Trians+i;
glVertex3f(draw_Trian->pnts[0].coords[0], draw_Trian->pnts[0].coords[1], draw_Trian->pnts[0].coords[2]);
glVertex3f(draw_Trian->pnts[1].coords[0], draw_Trian->pnts[1].coords[1], draw_Trian->pnts[1].coords[2]);
glVertex3f(draw_Trian->pnts[2].coords[0], draw_Trian->pnts[2].coords[1], draw_Trian->pnts[2].coords[2]);
}
glDisable(GL_TEXTURE_2D);
free(new_sphere->Trians);
free(new_sphere);
glEnd();
You need to define how your texture is supposed to map to your triangles. This depends on the texture you're using. There are a multitude of ways to map the surface of a sphere with a texture (since no one mapping is free of singularities). It looks like you have a cylindrical projection texture there. So we will emit cylindrical UV coordinates.
I've tried to give you some code here, but it's assuming that
Your mesh is a unit sphere (i.e., centered at 0 and has radius 1)
pnts.coords is an array of floats
You want to use the second coordinate (coord[1]) as the 'up' direction (or the height in a cylindrical mapping)
Your code would look something like this. I've defined a new function for emitting cylindrical UVs, so you can put that wherever you like.
/* Map [(-1, -1, -1), (1, 1, 1)] into [(0, 0), (1, 1)] cylindrically */
inline void uvCylinder(float* coord) {
float angle = 0.5f * atan2(coord[2], coord[0]) / 3.14159f + 0.5f;
float height = 0.5f * coord[1] + 0.5f;
glTexCoord2f(angle, height);
}
glEnable(GL_TEXTURE_2D);
glBegin(GL_TRIANGLES);
for (int i = 0; i < new_sphere->NumOfTrians; i++) {
Triangle *t = new_sphere->Trians+i;
uvCylinder(t->pnts[0].coords);
glVertex3f(t->pnts[0].coords[0], t->pnts[0].coords[1], t->pnts[0].coords[2]);
uvCylinder(t->pnts[1].coords);
glVertex3f(t->pnts[1].coords[0], t->pnts[1].coords[1], t->pnts[1].coords[2]);
uvCylinder(t->pnts[2].coords);
glVertex3f(t->pnts[2].coords[0], t->pnts[2].coords[1], t->pnts[2].coords[2]);
}
glEnd();
glDisable(GL_TEXTURE_2D);
free(new_sphere->Trians);
free(new_sphere);
Note on Projections
The reason it's confusing to build UV coordinates for the whole sphere is that there isn't one 'correct' way to do it. Mathematically-speaking, there's no such thing as a perfect 2D mapping of a sphere; hence why we have so many different types of projections. When you have a 2D image that's a texture for a spherical object, you need to know what type of projection that image was built for, so that you can emit the correct UV coordinates for that texture.
Currently I'm working on a project for rendering applications to Framebuffer Object(FBO) first, and rendering back the applications by using the FBO color and depth texture attachments in OpenGL ES 2.0 .
Now multiple applications are rendered well with color buffers. When I'm trying to use depth information from depth texture buffer, it seems not working.
I tried to render depth texture by sampling it with texture coordinate, it is all white. People say the grayscale may differ quite slightly, namely even in the shadow part it's near to 1.0. So I modify my fragment shader to like following:
vec4 depth;
depth = texture2D(s_depth0, v_texCoord);
if(depth.r == 1.0)
gl_FragColor = vec4(1.0,0.0,0.0,1.0);
And without surprise, it's all red.
The application code:
void Draw ( ESContext *esContext )
{
UserData *userData = esContext->userData;
// Clear the color buffer
glClear ( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Draw a triangle
GLfloat vVertices[] = { 0.0f, 0.5f, 0.5f,
-0.5f, -0.5f,-0.5f,
0.5f, -0.5f,-0.5f };
// Set the viewport
glViewport ( 0, 0, esContext->width, esContext->height );
// Use the program object
glUseProgram ( userData->programObject );
// Load the vertex position
glVertexAttribPointer ( 0, 3, GL_FLOAT, GL_FALSE, 0, vVertices );
glEnableVertexAttribArray ( 0 );
glDrawArrays ( GL_TRIANGLES, 0, 3 );
eglSwapBuffers ( esContext->eglDisplay, esContext->eglSurface );
}
So, what would be the problem, if color buffer works fine, while depth buffer doesn't work?
I solved it finally. That reason ends up to be a lack of GL_DEPTH_TEST enabling from client applications.
So if you got the same problem, be sure to enable GL_DEPTH_TEST by calling glEnable(GL_DEPTH_TEST); during OpenGL ES initialization. By default it's disabled for the sake of performance I guess.
Thanks for all advices and answers.
The depth texture needs to be linearized to be seen in the viewport because it's saved in exponential form. Try this in fragment:
uniform float farClip, nearClip;
float depth = texture2D(s_depth0, v_texCoord).x;
float depthLinear = (2 * nearClip) / (farClip + nearClip - depth * (farClip - nearClip));
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 8 years ago.
Improve this question
I had a drawing function called DrawImage but it's really confusing and is only working with a specific form of the reshape function so I have 2 questions:
How do I draw a texture in OpenGL ? I just want to create a function that gets a texture, x, y, width, height and maybe angle and paint it and draws it according to the arguments. I want to draw it as a GL_QUAD regularly but I'm not sure how to do that anymore .-. People say I should use SDL or SFML to do so, is it recommended ? If it is, can you give me a simple function that loads a texture and one that draws it ? I'm currently using SOIL to load textures.
the function is as here:
void DrawImage(char filename, int xx, int yy, int ww, int hh, int angle)
{
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, filename);
glLoadIdentity();
glTranslatef(xx,yy,0.0);
glRotatef(angle,0.0,0.0,1.0);
glTranslatef(-xx,-yy,0.0);
// Draw a textured quad
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex2f(xx,yy);
glTexCoord2f(0, 1); glVertex2f(xx,yy + hh);
glTexCoord2f(1, 1); glVertex2f(xx + ww,yy + hh);
glTexCoord2f(1, 0); glVertex2f(xx + ww,yy);
glDisable(GL_TEXTURE_2D);
glPopMatrix();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
glEnd();
}
Someone said to me that you can't call glDisable, glPopMatrix or glMatrixMode between glBegin and glEnd. The problem is - the code won't work without it. Any idea how to make it work without it ?
2. About the glutReshapeFunc, the documentation says it gets a pointer to a functions with 2 args, width and height - I created (up to now) a function that gets void - any idea how to write a reshape function that gets a width and height and actually does what reshape needs to do.
and one minor question: How better is C++ than C when it comes to GUIs like OpenGL ? As all as I can see, only OOP is the matter and I didn't went to any problem that OOP could solve and C couldn't (in OpenGL I mean).
No need to answer all of the question - question number 1 is basically the most important to me :P
Your DrawImage function looks pretty much just fine. Although, yes, you shouldn't be calling glMatrixMode etc. befor glEnd so remove them. I believe the issue is simply to do with setting up your projection matrix and the added calls just happen to fix an issue that shouldn't be there in the first place. glutReshapeFunc is used to capture window resize events so until you need it you don't have to use it.
SDL gives you a lot more control over events and glut, but takes a little longer to set up. GLFW is also a good alternative. I guess its not that important to change unless you see a feature you need. These are libs to create a GL context and do some event handling. SOIL can be used for them all.
OpenGL is a graphics API and gives a common interface for doing hardware accelerated 3D graphics, not a GUI lib. There are GUI libs written for OpenGL though.
Yes I believe many take OOP to the extreme. I like the term C++ as a better C, rather than completely restructuring the way you code. Maybe just keep using C, but with a C++ compiler. Then when you see a feature you like, use it. Eventually you may find you're using lots and then have a better appreciation for the reason for their existence and when to use them rather than blindly following coding practices. Just imo, this is all very subjective.
So, the projection matrix...
To draw stuff in 3D on a 2D screen you "project" the 3D points onto a plane. I'm sure you've seen images like this:
This allows you to define your arbitrary 3D coordinate system. Except for drawing stuff in 2D its natural to want to use pixel coordinates directly. After all that's what you monitor displays. Thus, you want to use kind of a bypass projection which doesn't do any perspective scaling and matches pixels in scale and aspect ratio.
The default projection (or "viewing volume") is an orthographic -1 to one cube. To change it,
glMatrixMode(GL_PROJECTION); //from now on all glOrtho, glTranslate etc affect projection
glOrtho(0, widthInPixels, 0, heightInPixels, -1, 1);
glMatrixMode(GL_MODELVIEW); //good to leave in edit-modelview mode
Call this anywhere really, but since the only affecting variables are window width/height it's normal to put it in some initialization code or, if you plan on resizing your window, a resize event handler such as:
void reshape(int x, int y) {... do stuff with x/y ...}
...
glutReshapeFunc(reshape); //give glut the callback
This will make the lower left corner of the screen the origin and values passed to glVertex can now be in pixels.
A couple more things: instead of glTranslatef(-xx,-yy,0.0); you could just use glVertex2f(0,0) after. Push/pop matrix should always be paired within a function so the caller isn't expected to match it.
I'll finish with a full example:
#include <GL/glut.h>
#include <GL/gl.h>
#include <stdio.h>
int main(int argc, char** argv)
{
//create GL context
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(800, 600);
glutCreateWindow("windowname");
//create test checker image
unsigned char texDat[64];
for (int i = 0; i < 64; ++i)
texDat[i] = ((i + (i / 8)) % 2) * 128 + 127;
//upload to GPU texture
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, 8, 8, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, texDat);
glBindTexture(GL_TEXTURE_2D, 0);
//match projection to window resolution (could be in reshape callback)
glMatrixMode(GL_PROJECTION);
glOrtho(0, 800, 0, 600, -1, 1);
glMatrixMode(GL_MODELVIEW);
//clear and draw quad with texture (could be in display callback)
glClear(GL_COLOR_BUFFER_BIT);
glBindTexture(GL_TEXTURE_2D, tex);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2i(0, 0); glVertex2i(100, 100);
glTexCoord2i(0, 1); glVertex2i(100, 500);
glTexCoord2i(1, 1); glVertex2i(500, 500);
glTexCoord2i(1, 0); glVertex2i(500, 100);
glEnd();
glDisable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
glFlush(); //don't need this with GLUT_DOUBLE and glutSwapBuffers
getchar(); //pause so you can see what just happened
//System("pause"); //I think this works on windows
return 0;
}
If you're ok with using OpenGL 3.0 or higher, an easier way to draw a texture is glBlitFramebuffer(). It won't support rotation, but only copying the texture to a rectangle within your framebuffer, including scaling if necessary.
I haven't tested this code, but it would look something like this, with tex being your texture id:
GLuint readFboId = 0;
glGenFramebuffers(1, &readFboId);
glBindFramebuffer(GL_READ_FRAMEBUFFER, readFboId);
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, tex, 0);
glBlitFramebuffer(0, 0, texWidth, texHeight,
0, 0, winWidth, winHeight,
GL_COLOR_BUFFER_BIT, GL_LINEAR);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
glDeleteFramebuffers(1, &readFboId);
You can of course reuse the same FBO if you want to draw textures repeatedly. I only create/destroy it here to make the code self-contained.