How to draw a framebuffer object to the default framebuffer - c

This code is supposed to clear the background with yellow color using framebuffer object and render buffers but what I get is black background.
#include <SDL2/SDL.h>
#include <GL/glew.h>
int main( int argc, char** argv)
{
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MINOR_VERSION, 3);
SDL_GL_SetAttribute( SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute( SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute( SDL_GL_ACCELERATED_VISUAL, 1);
SDL_Window* gWindow= SDL_CreateWindow( "Title",
SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
500, 500, SDL_WINDOW_OPENGL);
SDL_GLContext gContext= SDL_GL_CreateContext( gWindow);
glewExperimental= GL_TRUE;
glewInit();
GLuint fbo;
glGenFramebuffers( 1, &fbo);
glBindFramebuffer( GL_FRAMEBUFFER, fbo);
GLuint color_rbr;
glGenRenderbuffers(1, &color_rbr);
glBindRenderbuffer( GL_RENDERBUFFER, color_rbr);
glRenderbufferStorage( GL_RENDERBUFFER, GL_RGBA32UI, 500, 500);
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, color_rbr);
GLuint depth_rbr;
glGenRenderbuffers( 1, &depth_rbr);
glBindRenderbuffer( GL_RENDERBUFFER, depth_rbr);
glRenderbufferStorage( GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, 500, 500);
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depth_rbr);
if( glCheckFramebufferStatus( GL_DRAW_FRAMEBUFFER)!= GL_FRAMEBUFFER_COMPLETE)
return 1;
if( glCheckFramebufferStatus( GL_READ_FRAMEBUFFER)!= GL_FRAMEBUFFER_COMPLETE)
return 2;
glViewport( 0, 0, 500, 500);
glClearColor( 1, 1, 0, 0);
SDL_GL_SetSwapInterval( 1);
int quit= 0;
SDL_Event event;
glReadBuffer( GL_COLOR_ATTACHMENT0);
while( !quit)
{
while( SDL_PollEvent( &event))
if( event.type== SDL_QUIT)
quit= 1;
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, fbo);
glDrawBuffer( GL_COLOR_ATTACHMENT0);
glClear( GL_COLOR_BUFFER_BIT);
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, 0);
glDrawBuffer( GL_BACK);
glBlitFramebuffer(
0, 0, 500, 500,
0, 0, 500, 500,
GL_COLOR_BUFFER_BIT, GL_NEAREST);
SDL_GL_SwapWindow( gWindow);
}
SDL_DestroyWindow( gWindow);
return 0;
}
It first clears the framebuffer object with the specified color then blits the framebuffer object to the default framebuffer. Is there something wrong in the code? I can't seem to find where exactly the problem is.

The glBlitFramebuffer operation fails, because the read buffer contains unsigned integer values and the default framebuffer doesn't. This gains a GL_INVALID_OPERATION error.
The format of the read buffer is GL_RGBA32UI
glRenderbufferStorage( GL_RENDERBUFFER, GL_RGBA32UI, 500, 500);
while the format of the draw buffer (default framebuffer) is an unsigned normalized integer format (probably GL_RGBA8).
If you change the internal format to GL_RGBA16 or GL_RGBA32F your code will proper work.
Note, formats like RGBA8 and RGBA16 are normalized floating point formats and can store floating point values in range [0.0, 1.0].
But formats like RGBA16UI and RGBA32UI are unsigned integral formats and can store integer values in the corresponding range.
The specification says that unsigned integer values can only be copied to unsigned integer values. So the source format has to be *UI and the target format has to be *UI, too.
See OpenGL 4.6 API Compatibility Profile Specification - 18.3.2 Blitting Pixel Rectangles page 662:
void BlitFramebuffer( int srcX0, int srcY0, int srcX1, int srcY1,
int dstX0, int dstY0, int dstX1, int dstY1, bitfield mask, enum filter );
[...]
Errors
[...]
An INVALID_OPERATION error is generated if format conversions are not supported, which occurs under any of the following conditions:
The read buffer contains fixed-point or floating-point values and any draw buffer contains neither fixed-point nor floating-point values.
The read buffer contains unsigned integer values and any draw buffer
does not contain unsigned integer values.
The read buffer contains signed integer values and any draw buffer does
not contain signed integer values.

Related

Encode buffer captured by OpenGL in C

I am trying to use OpenGL to capture the back buffer of my computer's screen, and then H.264 encode the buffer using FFMPEG's libavcodec library. The issue I'm having is that I would like to encode the video in AV_PIX_FMT_420P, but the back buffer capture function provided by OpenGL, glReadPixels(), only supports formats like GL_RGB. As you can see below, I try to use FFMPEG's swscale() function to convert from RGB to YUV, but the following code crashes at the swscale() line. Any ideas on how I can encode the OpenGL backbuffer?
// CAPTURE BACK BUFFER USING OPENGL
int width = 1280, height = 720;
BYTE* pixels = (BYTE *) malloc(sizeof(BYTE));
glReadPixels(0, 720, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
//CREATE FFMPEG VARIABLES
avcodec_register_all();
AVCodec *codec;
AVCodecContext *context;
struct SwsContext *sws;
AVPacket packet;
AVFrame *frame;
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
context = avcodec_alloc_context3(encoder->codec);
context->dct_algo = FF_DCT_FASTINT;
context->bit_rate = 400000;
context->width = width;
context->height = height;
context->time_base.num = 1;
context->time_base.den = 30;
context->gop_size = 1;
context->max_b_frames = 1;
context->pix_fmt = AV_PIX_FMT_YUV420P;
avcodec_open2(context, codec, NULL);
// CONVERT TO YUV AND ENCODE
int frame_size = avpicture_get_size(AV_PIX_FMT_YUV420P, out_width, out_height);
encoder->frame_buffer = malloc(frame_size);
avpicture_fill((AVPicture *) encoder->frame, (uint8_t *) encoder->frame_buffer, AV_PIX_FMT_YUV420P, out_width, out_height);
sws = sws_getContext(in_width, in_height, AV_PIX_FMT_RGB32, out_width, out_height, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, 0, 0, 0);
uint8_t *in_data[1] = {(uint8_t *) pixels};
int in_linesize[1] = {width * 4};
// PROGRAM CRASHES HERE
sws_scale(encoder->sws, in_data, in_linesize, 0, encoder->in_height, encoder->frame->data, encoder->frame->linesize);
av_free_packet(&packet);
av_init_packet(&packet);
int success;
avcodec_encode_video2(context, &packet, frame, &success);
Your pixels buffer is too small; you malloc only one BYTE instead of width*height*4 bytes:
BYTE* pixels = (BYTE *) malloc(width*height*4);
Your glReadPixels call is also incorrect:
Passing y=720 causes it to read outside the window. Remember that OpenGL coordinate system has the y-axis pointing upwards.
AV_PIX_FMT_RGB32 expects four bytes per pixel, whereas GL_RGB writes three bytes per pixel, therefore you need GL_RGBA or GL_BGRA.
Of the two I'm pretty sure that it should be GL_BGRA: AV_PIX_FMT_RGB32 treats pixels as 32-bit integers, therefore on little-endian blue comes first. OpenGL treats each channel as a byte, therefore it should be GL_BGRA to match.
To summarize try:
glReadPixels(0, 0, width, height, GL_BGRA, GL_UNSIGNED_BYTE, pixels);
Additionally, due to OpenGL y-axis pointing upwards but ffmpeg y-axis pointing downwards you may need to flip the image. It can be done with the following trick:
uint8_t *in_data[1] = {(uint8_t *) pixels + (height-1)*width*4}; // address of the last line
int in_linesize[1] = {- width * 4}; // negative stride

SDL Texture created from memory is rendering only in black and white

I am trying to port a game from DDraw to SDL2.
The original program loads the images and blits them to a backbuffer then flips it to a primary one.
I am thinking that I could technically shortcut part of the process and just grab the backbuffer in memory and then turn it into a texture and blit that to the screen. This kind of works already the only problem is that the screen is black and white.
here is some code. The variable that is holding the backbuffer is the destmemarea
if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
SDL_Log("Unable to initialize SDL: %s", SDL_GetError());
}
SDL_Window* window = NULL;
SDL_Texture *bitmapTex = NULL;
SDL_Surface *bitmapSurface = NULL;
SDL_Surface *MySurface = NULL;
SDL_DisplayMode DM;
SDL_GetCurrentDisplayMode(0, &DM);
auto Width = DM.w;
auto Height = DM.h;
window = SDL_CreateWindow("SDL Tutorial ", Width = DM.w - SCREEN_WIDTH, 32, SCREEN_WIDTH *4, SCREEN_HEIGHT, SDL_WINDOW_SHOWN);
if (window == NULL)
{
printf("Window could not be created! SDL_Error: %s\n", SDL_GetError());
}
SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);
int w, h;
SDL_GetRendererOutputSize(renderer, &w, &h);
SDL_Surface * image = SDL_CreateRGBSurfaceFrom( destmemarea, 640, 0, 32, 640, 0, 0, 0,0);
SDL_Texture * texture = SDL_CreateTextureFromSurface(renderer, image);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
SDL_Delay(10000);
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
}
Not sure if this helps but this is what is being used for DDRAW fort he looks...
dd.dwWidth = 768;
dd.lPitch = 768;
dd.dwSize = 108;
dd.dwFlags = DDSD_PIXELFORMAT|DDSD_PITCH|DDSD_WIDTH|DDSD_HEIGHT|DDSD_CAPS;
dd.ddsCaps.dwCaps = DDSCAPS_SYSTEMMEMORY|DDSCAPS_OFFSCREENPLAIN;
dd.dwHeight = 656;
dd.ddpfPixelFormat.dwSize = 32;
So, I'm not 100% sure I understand what you are trying to do, but I have a few assumptions.
You said that you're porting your codebase from DDraw, so I assume that the backbuffer you are mentioning is an internal backbuffer that you are allocating, and in the rest of your application are doing your rendering to it.
If I am correct in this assumption, than your current approach is what you need to do, but need to specify correct parameters to SDL_CreateRGBSurfaceFrom
width and height are... width and height in pixels
depth is the amount of bits in a single pixel. This depends on the rest of your rendering code that writes to your memory buffer. If we assume that you're doing a standard RGBA, where each channel is 8 bits, it would be 32.
pitch is the size in bytes for a single row in your surface - should be equal to width * (depth / 8).
the 4 masks, Rmask, Gmask, Bmask, and Amask describe how each of your depth sized pixels distributes channels. Again, depends on how you render to your memory buffer, and the endianness of your target platform. From the documentation, 2 possible standard layouts:
#if SDL_BYTEORDER == SDL_BIG_ENDIAN
rmask = 0xff000000;
gmask = 0x00ff0000;
bmask = 0x0000ff00;
amask = 0x000000ff;
#else
rmask = 0x000000ff;
gmask = 0x0000ff00;
bmask = 0x00ff0000;
amask = 0xff000000;
#endif
Be sure not to forget to free your surface by calling SDL_FreeSurface()
With all that said... I think you are approaching your problem from the wrong angle.
As I stated in my comment, SDL handles double buffering for you. Instead of having custom code that renders to a buffer in memory, and then trying to create a surface from that memory and rendering it to SDLs backbuffer, and calling present... you should skip the middle man and draw directly to SDLs back buffer.
This is done through the various SDL render functions, of which RenderCopy is a member.
Your render loop should basically do 3 things:
Call SDL_RenderClear()
Loop over every object that you want to present to the screen, and use one of the SDL render functions - in the most common case of an image, that would be SDL_RenderCopy. This would mean, throughout your codebase, load your images, create SDL_Surface and SDL_Texture for them, keep those, and on every frame call SDL_RenderCopy or SDL_RenderCopyEx
Finally, you call SDL_RenderPresent exactly once per frame. This will swap the buffers, and present your image to screen.

When processing an image, it gets weird colors in GTK

Hello people from stackoverflow, what I want to do is to process an image with a transformation pixel by pixel to make it darker. The idea is really simple, I have to do something like this:
R *= factor
G *= factor
B *= factor
Where "factor" is a float number between 0 and 1, and R, G, B are the Rgb numbers for each pixel. To do so, I load an "RGB" file that has three numbers for each pixel from 0 to 255 to an array of char pointers.
char *imagen1, *imagen;
int resx, resy; //resolution
imagen1 = malloc....;
//here I load a normal image buffer to imagen1
float factor = 0.5f; // it can be any number between 0 an 1
for(unsigned int i=0; i< 3*resx*resy; i++)
imagen[i] = (char) (((int) imagen1[i])*factor);
gtk_init (&argc, &argv);
window = gtk_window_new (GTK_WINDOW_TOPLEVEL);
g_signal_connect (window, "destroy", G_CALLBACK (gtk_main_quit), NULL);
pixbuf = gdk_pixbuf_new_from_data (buffer, GDK_COLORSPACE_RGB, FALSE, 8,
resx, resy, (resx)*3, NULL, NULL);
image = gtk_image_new_from_pixbuf (pixbuf);
gtk_container_add(GTK_CONTAINER (window), image);
pixbuf = gdk_pixbuf_new_from_data (imagen, GDK_COLORSPACE_RGB, FALSE, 8,
resx, resy, (resx)*3, NULL, NULL);
gtk_image_set_from_pixbuf(image, pixbuf);
Ignore if the GTK part is not properly written, it displays "imagen". If factor is 1 the image is well displayed, with real colors. The problem is when I use a number between 0 and 1, the image displayed gets very weird colors, like it is "saturated" or the color depth is worse. The further to 1 factor is, the worse the image gets. I don't know why it happens, I thought GTK normalized the RGB values and for that reason the color depth decreased, but I tried adding some white (255, 255, 255) and black (0, 0, 0) pixels and the problem persists. I would like to know what I am doing wrongly, sorry for my English. Thank you in advance!
Your colour component of a pixel is 8 bit char casted to an int. You are then multiplying by a float, so the int gets first converted to a float and a float is the result. This float is then down casted to a char which going off the representation of a float will look nothing like the int you really want it to look like.
You'll want to cast this result to an int before casting back to char.

Advice on an algorithm for rendering a bitmap pixel by pixel

I've been working on a bitmap loader, with the main goal to do nothing more than parse the data properly and render it in OpenGL. I'm at the point where I need to draw the pixels on an x/y (i.e., pixel by pixel) basis (at least, this is what I think I need to do as far as rendering is concerned). I've already bound the texture object and called glTexImage2D(...).
Currently, what I'm having trouble with is the pixel by pixel algorithm.
As far as I understand it, bitmap (aka DIB) files store color data in what is known as the pixel array. Each row of pixels consists of x amount of bytes, with each pixel holding a byte count divisible either by 4 ( 32 bits per pixel ), 3 ( 24 bits per pixel ), 2 ( 16 bits per pixel ), or 1 ( 8 bits per pixel ).
I think need to loop through the pixels while at the same time calculating the right offset within the pixel array, which is relative to its pixel x/y coordinate. Is this true, though? If not, what should I do? I'm honestly slightly confused as to whether or not, despite doing what was directed to me in this question I asked sometime ago, this approach is correct.
I assume that going about it on a pixel by pixel basis is the right approach, mainly because
rendering a quad with glVertex* and glTexCoord* produced nothing more than a grayed out rectangle (at the time I thought the OpenGL would handle this by itself, hence why attempting that in the first place).
I should also note that, while my question displays OpenGL 3.1 shaders, I moved to SDL 1.2
so I could just use immediate mode for the time being until I got the right algorithms implemented, and then switch back to modern GL.
The test image I'm parsing:
It's data output (pastebinned due to its very long length):
http://pastebin.com/6RVhAVRq
And The Code:
void R_RenderTexture_PixByPix( texture_bmp_t* const data, const vec3 center )
{
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
glBegin( GL_POINTS );
{
const unsigned width = data->img_data->width + ( unsigned int ) center[ VEC_X ];
const unsigned height = data->img_data->height + ( unsigned int ) center[ VEC_Y ];
const unsigned bytecount = GetByteCount( data->img_data->bpp );
const unsigned char* pixels = data->img_data->pixels;
unsigned color_offset = 0;
unsigned x_pixel;
for ( x_pixel = center[ VEC_X ]; x_pixel < width; ++x_pixel )
{
unsigned y_pixel;
for ( y_pixel = center[ VEC_Y ]; y_pixel < height; ++y_pixel )
{
}
const bool do_color_update = true; //<--- replace true with a condition which checks to see if the color needs to be updated.
if ( do_color_update )
{
glColor3fv( pixels + color_offset );
}
color_offset += bytecount;
}
}
glEnd();
glBindTexture( GL_TEXTURE_2D, 0 );
}
You're completely missing the point of a OpenGL texture in your code. The texture holds the image for you and the rasterizer does all the iterations over the pixel data for you. No need to write a slow pixel-pusher loop yourself.
As your code stands right now that texture is completely bogus and does nothing. You could completely omit the calls to glBindTexture and it'd still work – or not, because you're not actually drawing anything, you just set the glColor state. To draw something you'd have to call glVertex.
So why not leverage the pixel-pushing performance of modern GPUs and actually use a texture? How about this:
void R_RenderTexture_PixByPix( texture_bmp_t* const data, const vec3 center )
{
if( 0 == data->texbuf_id ) {
glGenTextures(1, &(data->texbuf_id));
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// there are a few more, but the defaults are ok
// if you didn't change them no need for further unpack settings
GLenum internal_format;
GLenum format;
GLenum type;
switch(data->img_data->bpp) {
case 8:
// this could be a palette or grayscale
internal_format = GL_LUMINANCE8;
format = GL_LUMINANCE;
type = GL_UNSIGNED_BYTE;
break;
case 15:
internal_format = GL_RGB5;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_SHORT_1_5_5_5;
break;
case 16:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_SHORT_5_6_5;
break;
case 24:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_BYTE;
break;
case 32:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_INT_8_8_8_8;
break;
}
glTexImage2D( GL_TEXTURE_2D, 0, internal_format,
data->img_data->width, data->img_data->height, 0,
format, type, data->img_data->pixels );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
} else {
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
}
static GLfloat verts[] = {
0, 0,
1, 0,
1, 1,
0, 1
};
// the following is to address texture image pixel centers
// tex coordinates 0 and 1 are not on pixel centers!
float const s0 = 1. / (2.*tex_width);
float const s1 = ( 2.*(tex_width-1) + 1.) / (2.*tex_width);
float const t0 = 1. / (2.*tex_height);
float const t1 = ( 2.*(tex_height-1) + 1.) / (2.*tex_height);
GLfloat texcoords[] = {
s0, t0,
s1, t0,
s1, t1,
s0, t1
};
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
glVertexPointer(2, GL_FLOAT, 0, verts);
glTexCoordPointer(2, GL_FLOAT, 0, texcoords);
glColor4f(1., 1., 1., 1.);
glDrawArrays(GL_QUADS, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glBindTexture( GL_TEXTURE_2D, 0 );
}
Your intuition is basically correct. The pixels are stored as an array of bytes, but the bytes are arranged into consecutive groups, with each group representing a single pixel. To address a single pixel, you'll need to do a calculation like this:
unsigned char* image_data = start_of_pixel_data;
unsigned char* pixel_addr = image_data + bytes_per_pixel * (y * width_in_pixels + x);
Be careful about the width in pixels, as sometimes there is padding at the end of the row to bring the total row width in bytes up to a multiple of 4/8/16/32/64/etc. I recommend looking at the actual bytes of the bitmap in hex first to get a sense of what is going on. It's a great learning exercise and will give you high confidence in your pixel-walking code, which is what you want. You might be able to use a debugger to do this, or else write a simple loop with printf over the image bytes.

OPENCV: I want to draw basic shapes in a loop removing older shapes each time a new shape is drawn

I have taken code from shape.c given in opencv library and modified a little bit.
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
int main(int argc, char** argv)
{
int i = 0;
for(i=0;i<3 ;i++)
{
/* create an image */
IplImage *img = cvCreateImage(cvSize(200, 100), IPL_DEPTH_8U, 3);
/* draw a green line */
cvLine(img, /* the dest image */
cvPoint(10 +i*10, 10), /* start point */
cvPoint(150, 80), /* end point */
cvScalar(0, 255, 0, 0), /* the color; green */
1, 8, 0); /* thickness, line type, shift */
/* draw a blue box */
cvRectangle(img, /* the dest image */
cvPoint(20, 15+i*10), /* top left point */
cvPoint(100, 70), /* bottom right point */
cvScalar(255, 0, 0, 0), /* the color; blue */
1, 8, 0); /* thickness, line type, shift */
/* draw a red circle */
cvCircle(img, /* the dest image */
cvPoint(110, 60), 35+i*10, /* center point and radius */
cvScalar(0, 0, 255, 0), /* the color; red */
1, 8, 0); /* thickness, line type, shift */
/* display the image */
cvNamedWindow("img", CV_WINDOW_AUTOSIZE);
cvShowImage("img", img);
cvWaitKey(0);
cvDestroyWindow("img");
cvReleaseImage(&img);
}
return 0;
}
I want that whenever i increments, old figures should be removed and only new figures should be drawn.But what i am getting is, along with new figures old figures are also present.Can you help me please?.
There is no direct way
Initialize a background image
Draw foreground shape on a newly cloned background image
Draw another shape on a cloned background image
Display them one by one
When you create a (blank) image, the only way to be sure imageData is clean is to do it yourself by setting a solid color. After cvCreateImage() call cvSet().
IplImage *img = cvCreateImage(cvSize(200, 100), IPL_DEPTH_8U, 3);
cvSet(img, CV_RGB(0, 0, 0));
You can improve the performance of your application if the take out window creation/destruction out of the loop. There's no need to create a new window for every new image:
int i = 0;
cvNamedWindow("img", CV_WINDOW_AUTOSIZE);
for(i=0;i<3 ;i++)
{
// code
}
cvDestroyWindow("img");

Resources