Reuse texture SDL - c

I'm making my first SDL2 game, I have a texture where I draw my game but after each rendering the texture is blanked, I need to have my original texture unmodified.
I have easily made this with surface but it was too slow.
I draw random artefacts on this texture that disappears with the time, I use SDL_RenderFill to shade the texture.
Anyone know how to do this ?
EDIT: Here's the code of the texture rendering
int gv_render(void) // This is called every 10ms
{
gv_lock;
int nexttimeout;
// Clear the screen
SDL_SetRenderTarget(renderer,NULL);
SDL_SetRenderDrawColor(renderer,0,0,0,255);
SDL_SetRenderDrawBlendMode(renderer,SDL_BLENDMODE_NONE);
SDL_RenderClear(renderer);
// Render view specific stuff
SDL_SetRenderTarget(renderer,gv_screen); // gv_screen is my screen texture
switch (player_view) { // I have multiple views
case pvsound:nexttimeout=wave_render();break; // <- THE 2ND FUNCTION \/
};
SDL_RenderPresent(renderer);
// Final screen rendering
SDL_SetRenderTarget(renderer,NULL);
SDL_RenderCopy(renderer,gv_screen,NULL,NULL);
gv_unlock;
return nexttimeout;
};
int wave_render(void) // I (will) have multiple view modes
{
game_wave *currwave = firstwave; // First wave is the first element of a linked list
game_wave *prevwave = firstwave;
map_block* block;
map_block* oldblock;
gv_lock;
// Load the old texture
SDL_RenderCopy(renderer,gv_screen,NULL,NULL);
SDL_SetRenderDrawBlendMode(renderer,SDL_BLENDMODE_BLEND);
// Dark the screen
SDL_SetRenderDrawColor(renderer,0,0,0,8);
SDL_RenderFillRect(renderer,NULL);
SDL_SetRenderDrawBlendMode(renderer,SDL_BLENDMODE_NONE);
// Now I travel my list
while (currwave) {
// Apply block info
/* skipped non graphics */
// Draw the wave point
uint8_t light; // Wave have a strong that decrease with time
if (currwave->strong>=1.0)
light = 255; // Over 1 it don't decrease
else light = currwave->strong*255; // Now they aren't fully white
SDL_SetRenderDrawColor(renderer,light,light,light,255);
SDL_RenderDrawPoint(renderer, currwave->xpos,currwave->ypos);
// Switch to next wave
prevwave = currwave; // There also code is the skipped part
currwave = currwave->next;
};
SDL_RenderPresent(renderer);
gv_unlock;
return 10;
};
```

This seem to be complicated, as say #david C. Rankin the SDL renderer is faster than surface but more or less write-only (SDL_RenderReadPixels and SDL_UpdateTexture could do the job in non realtime case).
I have changed my method, I use a linked list of pixels coordinates with entry points in a 256 items array.
My source code is now :
struct game_wave_point {
struct game_wave_point *next;
int x;
int y;
};typedef struct game_wave_point game_wave_point;
game_wave_point* graph_waves[256] = {NULL,NULL,...};
wave_render(void)
{
game_wave *currwave = firstwave;
// Perform the darkening
int i;
uint8_t light;
for (i=1;i<=255;i++)
graph_waves[i-1] = graph_waves[i];
graph_waves[255] = NULL;
// Remove unvisible point
game_wave_point* newpoint;
while (graph_waves[0]) {
newpoint = graph_waves[0];
graph_waves[0] = newpoint->next;
free(newpoint);
};
// Wave heartbeat...
while (currwave) {
/* blablabla */
// Add the drawing point
newpoint = malloc(sizeof(game_wave_point));
newpoint->next = graph_waves[light];
newpoint->x = currwave->xpos*pixelsperblock;
newpoint->y = currwave->ypos*pixelsperblock;
if ((newpoint->x<0)|(newpoint->y<0))
free(newpoint);
else graph_waves[light] = newpoint;
/* blablabla */
};
// Now perform the drawing
for (i=1;i<=255;i++) {
newpoint = graph_waves[i];
SDL_SetRenderDrawColor(renderer,i,i,i,255);
SDL_GetRenderDrawColor(renderer,&light,NULL,NULL,NULL);
while (newpoint) {
SDL_RenderDrawPoint(renderer,newpoint->x,newpoint->y);
newpoint = newpoint->next;
};
};
return 10;
};
This work well on my computer (progressive slow appear in a case that I will never reach).
Next optimization maybe performed with Linux mremap(2) and similar, this will allow creating a simple array that work with SDL_RenderDrawPoints without slowness of realloc() on big array.

Related

Scaled Layers in GDI

Original question
Basically, I have two bitmaps, and I want to put one behind the other, scaled down to half its size.
Both are centered, and are of the same resolution.
The catch is that I want to put more than one bitmap on this back layer eventually, and want the scaling to apply to the whole layer and not just the individual bitmap.
My thought is I would use a memory DC for the back layer, capture its contents into a bitmap of its own and use StretchBlt to place it in my main dc
The code I have right now doesn't work, and I can't make sense of it, let alone find anyone who had done this before for direction.
My variables at the moment are as follows
hBitmap - back bitmap
hFiller - front bitmap
hdc - main DC
ldc - back DC(made with CreateCompatibleDC(hdc);)
resh - width of hdc
resv - height of hdc
note that my viewport origin is set to the center
--this part above is solved, with the one major issue being that it does not keep the back layers...
Revised Question
Here's my code. Everything works as intended except for the fact that the layers do not properly stack. They seem to erase what is underneath or fill it with black.
For the record this is a direct copy of my code. I explain sections of it but there is nothing missing between the code blocks.
case WM_TIMER:
{
switch(wParam)
{
case FRAME:
If any position or rotation values have changed, the following section of code clears the screen and prepares it to be rewritten
if(reload == TRUE){
tdc = CreateCompatibleDC(hdc);
oldFiller = SelectObject(tdc,hFiller);
GetObject(hFiller, sizeof(filler), &filler);
StretchBlt(hdc, 0-(resh/2), 0-(resv/2), resh, resv, tdc, 0, 0, 1, 1, SRCCOPY);
SelectObject(tdc,oldFiller);
DeleteDC(tdc);
if(turn == TRUE){
xForm.eM11 = (FLOAT) cos(r/angleratio);
xForm.eM12 = (FLOAT) sin(r/angleratio);
xForm.eM21 = (FLOAT) -sin(r/angleratio);
xForm.eM22 = (FLOAT) cos(r/angleratio);
xForm.eDx = (FLOAT) 0.0;
xForm.eDy = (FLOAT) 0.0;
SetWorldTransform(hdc, &xForm);
}
This is the part that only partially works. At a distance of 80 my scale value will make my bitmap 1 pixel by 1 pixel, so I consider this my "draw distance"
It scales properly, but the layers do not stack, as I mentioned above
for(int i=80;i>1;i--){
tdc = CreateCompatibleDC(hdc);
tbm = CreateCompatibleBitmap(hdc, resh, resv);
SelectObject(tdc, tbm);
BitBlt(tdc, 0-(resh/2), 0-(resv/2), resh, resv,hdc,0,0,SRCCOPY);
//drawing code goes in here
ldc = CreateCompatibleDC(hdc);
oldBitmap = SelectObject(ldc,hBitmap);
StretchBlt(tdc,(int)(angleratio*atan((double)128/(double)i)),0,(int)(angleratio*atan((double)128/(double)i)),(int)(angleratio*atan((double)128/(double)i)),ldc,0,0,128,128,SRCCOPY);
SelectObject(ldc,oldBitmap);
DeleteDC(ldc);
BitBlt(hdc, 0, 0, resh, resv, tdc, 0, 0, SRCCOPY);
DeleteObject(tbm);
DeleteDC(tdc);
}
reload = FALSE;
}
This section below just checks for keyboard input which changes the position or rotation of the "camera"
This part works fine and can be ignored
if(GetKeyboardState(NULL)==TRUE){
reload = TRUE;
if(GetKeyState(VK_UP)<0){
fb--;
}
if(GetKeyState(VK_DOWN)<0){
fb++;
}
if(GetKeyState(VK_RIGHT)<0){
lr--;
}
if(GetKeyState(VK_LEFT)<0){
lr++;
}
if(GetKeyState(0x57)<0){
p++;
}
if(GetKeyState(0x53)<0){
p--;
}
}
break;
}
}
break;

SDL2 CreateTextureFromSurface slow down

im doing a graphic interface in SDL2 but if i create the renderer with the flags SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC i get a notable slowdown in comparation with the flag SDL_RENDERER_SOFTWARE what i think shouldn't be possible.
I can't use SDL_RENDERER_SOFTWARE because i need enable VSYNC for avoid the tearing and i need double buffer for that.
Actually i realize that the bottleneck is with the function SDL_CreateTextureFromSurface().
Like my code is pretty big i'll try to explain it instead of past everything here:
Initialize SDL and create a SDL_Surface named screen_surface with SDL_CreateRGBSurface with the same size than my window where ill blit any other surface.
I draw a big square in the middle of that surface with SDL_FillRect and draw a rack inside that square using two times SDL_FillRect for draw two squares, one 2 pixels more big than the next one and like that simulate a empty square (i know i can do the same with SDL_RenderDrawRect but i think is more optimal draw in a surface instead of the Render) for every cell of the rack until i have 4096 cells;
now using SDL_TTF i write info in each cell for that i use TTF_RenderUTF8_Blended for get a surface for each cell and i use SDL_BlitSurface for 'fusion' this surfaces with the screen_surface
And finally i want to go through the big square illuminating the cells that are being cheked for that i use SDL_FillRect for draw a little square that travel throught the rack.
Finally i use the SDL_CreateTextureFromSurface for transform screen_surface in screen_texture followed for SDL_RenderCopy and SDL_RenderPresent
This five steps are inside of the main while with the event management and following the recomendations in the SDL_API i do SDL_RenderClear each loop for redraw everything another time.
Said all this how i said at the begining i realise that the bottleneck is step 5 independent from the another steps because if i take the steps 2 and 3 and i do them before the while leaving inside the while only the creation of the rack illumination on a black window (cause im not drawing anything) i get the same slowdown. Only if i manage to draw things without use textures the velocity increase notably.
There are my questions:
Why could this happening? Teorically use double buffering shouldn't be faster than use Software Renderer?
There is any form to simulate vsync in Software Renderer?
Can i Render a Surface without build a Texture?
PD: I have read a bunch of post around the internet and im gonna answer some typical questions: i reutilize the screen_surface, i can't reutilize the surface that TTF returns, im creating and destroying the texture each loop (cause i think i can not reutilize it).
I let here my code
int main(int ac, char **av)
{
t_data data;
init_data(&data) /* initialize SDL */
ft_ini_font(data); /* Initialize TTF */
ft_ini_interface(data);
main_loop(&data);
ft_quit_graphics(data); /* Close SDL and TTF */
free(data);
return (0);
}
void main_loop(t_data *data)
{
while (data->running)
{
events(data);
SDL_BlitSurface(data->rack_surface, NULL, data->screen_surface, &(SDL_Rect){data->rack_x, data->rack_y, data->rack_w, data->rack_h}); /* Insert the rack in the screen_surface */
ft_write_info(data);
ft_illum_celd(data);
set_back_to_front(data);
}
}
void ft_ini_interface(t_data *data)
{
data->screen_surface = SDL_CreateRGBSurface(0, data->w, data->h, 32, RED_MASK, GREEN_MASK, BLUE_MASK, ALPHA_MASK)
...
/* stuff for calculate rack dims */
...
data->rack_surface = generate_rack_surface(data);
}
void generate_rack_surface(t_data *data)
{
int i;
int j;
int k;
data->rack_surface = SDL_CreateRGBSurface(0, data->rack_w, data->rack_h, 32, RED_MASK, GREEN_MASK, BLUE_MASK, ALPHA_MASK);
SDL_FillRect(Graph->rack, NULL, 0x3D3D33FF);
...
/* ini i, j, k for drawn the rack properly */
...
while (all cells not drawn)
{
if (k && !i)
{
data->celd_y += Graph->data->celd_h - 1;
data->celd_x = 0;
k--;
}
SDL_FillRect(data->rack, &(SDL_Rect){data->celd_x - 1, data->celd_y - 1, data->celd_w + 2, data->celd_h + 2}, 0x1C1C15FF))
SDL_FillRect(data->rack, &(SDL_Rect){data->celd_x, data->celd_y, data->celd_w, data->celd_h}, 0x3D3D33FF)
data->celd_x += data->celd_w - 1;
i--;
}
}
void ft_write_info(t_data *data)
{
SDL_Color color;
char *info;
while (all info not written)
{
color = take_color(); /*take the color of the info (only 4 ifs) */
info = take_info(data); /*take info from a source using malloc*/
surf_byte = TTF_RenderUTF8_Blended(data->font, info, color);
...
/*stuf for take the correct possition in the rack */
...
SDL_BlitSurface(surf_byte, NULL, Graph->screen.screen, &(SDL_Rect){data->info_x, data->info_y, data->celd.w, data->celd.h});
SDL_FreeSurface(surf_byte);
free(info);
}
void ft_illum_celd(t_data *data)
{
int color;
SDL_Rect illum;
illum = next_illum(data) /* return a SDL_Rect with the position of the info being read */
SDL_FillRect(data->screen_surface, &pc, color);
}
void set_back_to_front(t_data *data)
{
SDL_Texture *texture;
texture = SDL_CreateTextureFromSurface(data->Renderer, data->screen_surface);
SDL_RenderCopy(data->Renderer, texture, NULL, NULL);
SDL_DestroyTexture(texture);
SDL_RenderPresent(data->Renderer);
SDL_RenderClear(data->Renderer);
}

why is GlutPostRedisplay and sleep function is not working in this code?

i have tried to implement the data transfer between usb and cpu in this project. The data transfer is being shown as a small rectangle moving from one component of the computer to another.
In the code below, the GlutPostRedisplay does not work.
Also, can someone tell me if sleep() used is correct because the functions called in display do not work in sync. casing() is never executed. After fisrtscreen(), it directly jumps to opened() and operate() does not work.
what is the error with this code ??
void operate()
{
URLTEXTX = 200;
URLTEXTY = 950;
displayString(READUSB,1);
//southbrigde to northbrigde
bottom(488.0,425.0,380.0);
back(488.0,188.0,380.0);
top(188.0,380.0,550.0);
//northbridge to cpu
front(230.0,350.0,595.0);
top(345.0,600.0,650.0);
//read from usb
back(700.0,625.0,465.0);
bottom(625.0,460.0,385.0);
back(620.0,525.0,390.0);
sleep(1);
URLTEXTX = 200;
URLTEXTY = 950;
displayString(WRITEUSB,1);
//cpu to northbridge
bottom(350.0,650.0,595.0);
back(350.0,230.0,600.0);
//northbridge to southbridge
bottom(188.0,550.0,380.0);
front(188.0,488.0,380.0);
top(483.0,380.0,425.0);
//write to usb
front(525.0,625.0,385.0);
top(625.0,385.0,460.0);
front(620.0,700.0,460.0);
sleep(1);
URLTEXTX = 200;
URLTEXTY = 950;
displayString(READDVD,1);
//read from dvd
back(600.0,560.0,810.0);
bottom(570.0,810.0,600.0);
back(560.0,525.0,610.0);
//ram to northbridge
back(450.0,230.0,580.0);
//northbridge to cpu
front(230.0,350.0,595.0);
top(345.0,600.0,650.0);
sleep(1);
URLTEXTX = 200;
URLTEXTY = 950;
displayString(WRITEDVD,1);
//cpu to northbridge
bottom(350.0,650.0,595.0);
back(350.0,230.0,600.0);
//northbridge to ram
front(230.0,450.0,580.0);
//write to dvd
front(525.0,570.0,600.0);
top(570.0,600.0,800.0);
front(560.0,600.0,800.0);
sleep(1);
URLTEXTX = 200;
URLTEXTY = 950;
displayString(READHD,1);
//read from hard disc
back(640.0,560.0,300.0);
top(560.0,300.0,530.0);
back(560.0,525.0,530.0);
//ram to northbridge
back(450.0,230.0,580.0);
//northbridge to cpu
front(230.0,350.0,595.0);
top(345.0,600.0,650.0);
sleep(1);
URLTEXTX = 200;
URLTEXTY = 950;
displayString(WRITEHD,1);
//cpu to northbridge
bottom(350.0,650.0,595.0);
back(350.0,230.0,600.0);
//northbridge to ram
front(230.0,450.0,580.0);
//write to hard disc
front(525.0,560.0,530.0);
bottom(560.0,530.0,300.0);
front(560.0,640.0,300.0);
sleep(1);
}
void front(GLfloat x1,GLfloat x2,GLfloat y1)//to move in forward direction
{
GLfloat i;
for(i=x1;i<=x2;i++)
{
drawbit(i,x1+5,y1,y1-5);
glutPostRedisplay();
}
}
void back(GLfloat x1,GLfloat x2,GLfloat y1)//to move in backward direction
{
GLfloat i;
for(i=x1;i>=x2;i--)
{
drawbit(i,i-5,y1,y1-5);
glutPostRedisplay();
}
}
void top(GLfloat x1,GLfloat y1,GLfloat y2)//to move in upward direction
{
GLfloat i;
for(i=y1;i<=y2;i++)
{
drawbit(x1,x1+5,i,i+5);
glutPostRedisplay();
}
}
void bottom(GLfloat x1,GLfloat y1,GLfloat y2)//to move in downward direction
{
GLfloat i;
for(i=y1;i>=y2;i--)
{
drawbit(x1,x1-5,i,i-5);
glutPostRedisplay();
}
}
void drawbit(GLfloat x1,GLfloat x2,GLfloat y1,GLfloat y2)
{
glBegin(GL_POLYGON);
glColor3f(1.0,1.0,1.0);
glVertex2f(x1,y1);
glVertex2f(x2,y1);
glVertex2f(x2,y2);
glVertex2f(x1,y2);
glEnd();
glFlush();
}
void display()
{
glClear(GL_COLOR_BUFFER_BIT);
firstscreen(); //introduction to the project
sleep(3);
glClear(GL_COLOR_BUFFER_BIT);
casing(); //cpu case
sleep(2);
glClear(GL_COLOR_BUFFER_BIT);
opened(); //when cpu case is opened shows internal components
sleep(1);
operate(); //data transfer between various components
}
The problem is similar to this: Pausing in OpenGL successively
glutPostRedisplay simply sets a flag in glut to call your display callback on the next loop. It doesn't actually draw anything.
The function I suspect you're after is glutSwapBuffers. Without double buffering, geometry is drawn directly to the screen (although "draw" commands to the GPU are buffered for which you'd want glFlush). This commonly causes flickering because you see things that later get covered by closer geometry (because of the depth buffer). Double buffering solves this by rendering to an off-screen buffer and then displaying the result all at once. Make sure GLUT_DOUBLE is passed to glutInit so that you have a back buffer.
While you're sleep()ing, your application won't be able to capture and process events. Lets say you want to close the window. Until sleep returns the whole thing will be unresponsive. A sleep can still be important so you don't hog your CPU. I'd separate these concepts.
Loop/poll with an idle function until your delay time has elapsed. Then call glutPostRedisplay. Add glutSwapBuffers to display if you're double buffering.
Write a framerate limiter that calls sleep so you don't hog cycles.
A simple method to draw different things after set delays is to write a small state machine...
int state = STATE_INIT;
float timer = 0.0f;
void idle()
{
//insert framerate limiter here
//calculate time since last frame, perhaps using glutGet(GLUT_ELAPSED_TIME)
float deltaTime = ...
timer -= deltaTime;
if (timer < 0.0f)
{
switch (state)
{
case STATE_INIT:
state = STATE_DRAW_FIRST_THING;
timer = 123.0f;
...
}
glutPostRedisplay();
}
}
void display()
{
...
if (state == STATE_DRAW_FIRST_THING)
{
...
}
...
glutSwapBuffers();
}
As your app becomes bigger this I doubt this will be maintainable and you'll want something more robust, but until then this is a good start.
Simply changing a void (*currentView)(void); callback function in idle would save some hard coding in display. You might want to create an object orientated state machine. Beyond boolean states you might want to look into animation and keyframe interpolation. Rather than hard code everything, storing geometry, keyframes and state sequences in a file is a nice way to separate code and data. XML is very nice to work with for this provided you use a library.

Issues with adding an array of sprites to the screen in cocos2d?

for my game in cocos2d, I'm trying to add an array of sprites to the screen that constantly move to the left. When I add the method to my update method, it doesn't work as expected. I first set up a batch node.
- (void)setupBatchNode
{
_batchNode = [CCSpriteBatchNode batchNodeWithFile:#"berries-hd.pvr.ccz"];
[self addChild:_batchNode z:-1];
[[CCSpriteFrameCache sharedSpriteFrameCache]
addSpriteFramesWithFile:#"berries-hd.plist"];
}
-(void)updatePinkBerries:(ccTime)dt
{
CGSize winSize = [CCDirector sharedDirector].winSize; // Is it time to spawn a pink berry?
double curTime = CACurrentMediaTime();
if (curTime > _nextPinkBerrySpawn)
{
// Figure out the next time to spawn an asteroid
float randSecs = randomValueBetween(0.20, 1.0); _nextPinkBerrySpawn = randSecs + curTime;
// Figure out a random Y value to spawn at
float randY = randomValueBetween(0.0, winSize.height);
// Figure out a random amount of time to move from right to left
float randDuration = randomValueBetween(2.0, 10.0);
// Create a new berry sprite
CCSprite *pinkBerry = [CCSprite spriteWithSpriteFrameName:#"testt.png"];
[_batchNode addChild:pinkBerry];
// Set its position to be offscreen to the right
pinkBerry.position = ccp(winSize.width + pinkBerry.contentSize.width/2, randY);
// Move it offscreen to the left, and when it's done, call removeNode
[pinkBerry runAction:
[CCSequence actions:
[CCMoveBy actionWithDuration:randDuration position:ccp(-winSize.width- pinkBerry.contentSize.width, 0)],
[CCCallFuncN actionWithTarget:self selector:#selector(removeNode:)], nil]];
}
}

How would I map a camera image to create a live funhouse mirror using opencv?

Using Opencv and Linux I would like to create a fun-house mirror effect, short and squat, tall and thin effect using a live webcamera. My daughter loves those things and I would like to create one using a camera. I am not quite sure about the transforms necessary for these effects. Any help would be appreciated. I have much of the framework running, live video playing and such, just not the transforms.
thanx
I think that you need to use 'radial' transforms and 'pin cushion' which is inverse radial.
In order to braker the symmetry of the transforms you can strech the image before and after:
Suppose your image is 300x300
pixels.
Strech it to 300x600 or
600x300 using cvResize()
Apply transform: radial, pincushion or
sinusoidal
Strech back to 300x300
I never used radial or sinusoidal transforms in openCV so I dont have a piece of code to attach. But you can use cvUndistort2() and see if it is OK.
Create window with trackbars with range 0..100. Each trackbar controls parameter of distortion:
static IplImage* srcImage;
static IplImage* dstImage;
static double _camera[9];
static double _dist4Coeff[4]; // This is the transformation matrix
static int _r = 50; // Radial transform. 50 in range 0..100
static int _tX = 50; // Tangetial coef in X directio
static int _tY = 50; // Tangetial coef in Y directio
static int allRange = 50;
// Open windows
cvNamedWindow(winName, 1);
// Add track bars.
cvShowImage(winName, srcImage );
cvCreateTrackbar("Radial", winName, &_r , 2*allRange, callBackFun);
cvCreateTrackbar("Tang X", winName, &_tX , 2*allRange, callBackFun);
cvCreateTrackbar("Tang Y", winName, &_tY , 2*allRange, callBackFun);
callBackFun(0);
// The distortion call back
void callBackFun(int arg){
CvMat intrCamParamsMat = cvMat( 3, 3, CV_64F, _camera );
CvMat dist4Coeff = cvMat( 1, 4, CV_64F, _dist4Coeff );
// Build distortion coefficients matrix.
dist4Coeff.data.db[0] = (_r-allRange*1.0)/allRange*1.0;
dist4Coeff.data.db[1] = (_r-allRange*1.0)/allRange*1.0;
dist4Coeff.data.db[2] = (_tY-allRange*1.0)/allRange*1.0;
dist4Coeff.data.db[3] = (_tX-allRange*1.0)/allRange*1.0;
// Build intrinsic camera parameters matrix.
intrCamParamsMat.data.db[0] = 587.1769751432448200/2.0;
intrCamParamsMat.data.db[1] = 0.;
intrCamParamsMat.data.db[2] = 319.5000000000000000/2.0+0;
intrCamParamsMat.data.db[3] = 0.;
intrCamParamsMat.data.db[4] = 591.3189722549362800/2.0;
intrCamParamsMat.data.db[5] = 239.5000000000000000/2.0+0;
intrCamParamsMat.data.db[6] = 0.;
intrCamParamsMat.data.db[7] = 0.;
intrCamParamsMat.data.db[8] = 1.;
// Apply transformation
cvUndistort2( srcImage, dstImage, &intrCamParamsMat, &dist4Coeff );
cvShowImage( winName, dstImage );
}

Resources