memory leak in pictureBox Image - winforms

I have made a program in which I am changing image of picturebox again an again using this code
while(1)
{
this->pictureBox1->Refresh();
System::Drawing::Bitmap ^bmp;
bmp=drawImageMeter(data.Max);
this->pictureBox1->Image =pictureBox1->Image->FromHbitmap(bmp->GetHbitmap());
delete bmp;
}
drawImageMeter(data.Max); is function which is making bitmap and returning it.
My problem is that it is showing memory leak on this line
this->pictureBox1->Image =pictureBox1->Image->FromHbitmap(bmp->GetHbitmap());
Intel Vtune is showing this leak on the same line::
P810 Memory leak form2.h Form1.exe 808 New
P34 Kernel resource leak form2.h Form1.exe Not fixed
Edited Code...
delete pictureBox1->Image;
pictureBox1->Image=nullptr;
this->pictureBox1->Refresh();
bmp=drawImageMeter(data.Max);
System::IntPtr hbitmap = bmp->GetHbitmap();
this->pictureBox1->Image =pictureBox1->Image->FromHbitmap(hbitmap);//Memory LEAK & Kernel Resource Leak
delete bmp;
DeleteObject((HGDIOBJ)hbitmap );
After that I am not getting GDI resource leak but still getting Memory leak on this line..
this->pictureBox1->Image =pictureBox1->Image->FromHbitmap(hbitmap);//Memory LEAK
drawImageMeter() Definition
System::Drawing::Bitmap^ drawImageMeter(float intensity_value)
{
IplImage *Background=cvLoadImage("Dialer.bmp", 1); //Memory Leak
int width,height;
width=Background->width;
height=Background->height;
if(counter==1)
{
counter++;
needle_center.x=width/2;
needle_center.y=height/2;
needle_top.x=needle_center.x;
needle_top.y=needle_center.y-140;
}
double const PI = 3.14159265358979323;
int x1 = needle_top.x;
int y1 = needle_top.y;
int x0=needle_center.x;
int y0=needle_center.y;
float angle;
CurrIntensity = intensity_value;
angle = CurrIntensity-PreIntensity;
angle= 0.0703125f * angle;
// degrees, not radians
float radians = angle * (PI / 180.0f); // convert degrees to radians
if (current_max==1)
{
current_max++;
int N1x1 = needle_top.x;
int N1y1 = needle_top.y;
needle1_top.x = ((N1x1-x0) * cos(radians)) - ((N1y1-y0) * sin(radians)) + x0;
needle1_top.y = ((N1x1-x0) * sin(radians)) + ((N1y1-y0) * cos(radians)) + y0;
}
needle_top.x = ((x1-x0) * cos(radians)) - ((y1-y0) * sin(radians)) + x0;
needle_top.y = ((x1-x0) * sin(radians)) + ((y1-y0) * cos(radians)) + y0;
cvLine(Background, needle_center, needle1_top, CV_RGB(0, 0, 255), 1, 4, 0);
cvLine(Background, needle_center, needle_top, CV_RGB(255, 0, 0), 1, 4, 0);
System::Drawing::Bitmap ^bmp = gcnew System::Drawing::Bitmap(Background->width,Background->height,Background->widthStep,System::Drawing::Imaging::PixelFormat::Format24bppRgb,(System::IntPtr)Background->imageData);
PreIntensity = CurrIntensity;
return bmp;
}
In this function I am getting memory leak on this line where I have written //Memory Leak.
I can not release this IplImage *Background here in this function if I'll release this image here so i'll not be able to see image in pitureBox and doing this application is being closed.
Can anybody please help me sort out this problem.
Thanks.

Related

SOLVED: Faster HLSL code? Wondering about lower CPU overhead when rendering quads in 3-space

!!!UPDATE!!! Using the vertex shader to generate quads via DrawInstanced() calls definitely reduced CPU overhead and increased quads drawn per second. But there was much more performance to be found by using a combination of instanced drawing via a vertex shader that generates a point list, and a geometry shader that generates quads based on those points.
Thanks to #Soonts for not only recommending a faster way, but also for reminding me of conditional moves and unrolling loops.
Here is the geometry shader I created for sprites with 2D rotation:
cbuffer CB_PROJ {
matrix camera;
};
/* Reduced packet size -- 256x256 max atlas segments
-------------------
FLOAT3 Sprite location // 12 bytes
FLOAT Rotation // 16 bytes
FLOAT2 Scale // 24 bytes
UINT // 28 bytes
Fixed8p00 Texture X segment
Fixed8p00 Texture X total segments
Fixed8p00 Texture Y segment
Fixed8p00 Texture Y total segments
.Following vertex data is only processed by the vertex shader.
UINT // 32 bytes
Fixed3p00 Squadron generation method
Fixed7p00 Sprite stride
Fixed8p14 X/Y distance between sprites
*/
struct VOut {
float3 position : POSITION;
float3 r_s : NORMAL;
uint bits : BLENDINDICES;
};
struct GOut {
float4 pos : SV_Position;
float3 position : POSITION;
float3 n : NORMAL;
float2 tex : TEXCOORD;
uint pID : SV_PrimitiveID;
};
[maxvertexcount(4)]
void main(point VOut gin[1], uint pID : SV_PrimitiveID, inout TriangleStream<GOut> triStream) {
GOut output;
const uint bits = gin[0].bits;
const uint ySegs = (bits & 0x0FF000000) >> 24u;
const uint _yOS = (bits & 0x000FF0000) >> 16u;
const float yOS = 1.0f - float(_yOS) / float(ySegs);
const float yOSd = rcp(float(ySegs));
const uint xSegs = (bits & 0x00000FF00) >> 8u;
const uint _xOS = (bits & 0x0000000FF);
const float xOS = float(_xOS) / float(xSegs);
const float xOSd = rcp(float(xSegs));
float2 v;
output.pID = pID;
output.n = float3( 0.0f, 0.0f, -1.0f );
output.position = gin[0].position; // Translate
v.x = -gin[0].r_s.y; v.y = -gin[0].r_s.z; // Scale
output.tex = float2(xOS, yOS);
output.position.x += v.x * cos(gin[0].r_s.x) - v.y * sin(gin[0].r_s.x); // Rotate
output.position.y += v.x * sin(gin[0].r_s.x) + v.y * cos(gin[0].r_s.x);
output.pos = mul(float4(output.position, 1.0f), camera); // Transform
triStream.Append(output);
output.position = gin[0].position;
v.x = -gin[0].r_s.y; v.y = gin[0].r_s.z;
output.tex = float2(xOS, yOS - yOSd);
output.position.x += v.x * cos(gin[0].r_s.x) - v.y * sin(gin[0].r_s.x);
output.position.y += v.x * sin(gin[0].r_s.x) + v.y * cos(gin[0].r_s.x);
output.pos = mul(float4(output.position, 1.0f), camera);
triStream.Append(output);
output.position = gin[0].position;
v.x = gin[0].r_s.y; v.y = -gin[0].r_s.z;
output.tex = float2(xOS + xOSd, yOS);
output.position.x += v.x * cos(gin[0].r_s.x) - v.y * sin(gin[0].r_s.x);
output.position.y += v.y * sin(gin[0].r_s.x) + v.y * cos(gin[0].r_s.x);
output.pos = mul(float4(output.position, 1.0f), camera);
triStream.Append(output);
output.position = gin[0].position;
v.x = gin[0].r_s.y; v.y = gin[0].r_s.z;
output.tex = float2(xOS + xOSd, yOS - yOSd);
output.position.x += v.x * cos(gin[0].r_s.x) - v.y * sin(gin[0].r_s.x);
output.position.y += v.y * sin(gin[0].r_s.x) + v.y * cos(gin[0].r_s.x);
output.pos = mul(float4(output.position, 1.0f), camera);
triStream.Append(output);
}
!!!ORIGINAL TEXT!!!
Last time I was coding, I had barely started learning Direct3D9c. Currently I'm hitting about 30K single-texture quads lit with 15 lights at about 450fps. I haven't learned instancing or geometry shading at all yet, and I'm trying to prioritise the order I learn things in for my needs, so I've only taken glances at them.
My first thought was to reduce the amount of vertex data being shunted to the GPU, so I changed the vertex structure to a FLOAT2 (for texture coords) and an UINT (for indexing), relying on 4x float3 constants in the vertex shader to define the corners of the quads.
I figured I could reduce the size of the vertex data further, and reduced each vertex unit to a single UINT containing a 2bit index (to reference the real vertexes of the quad), and 2x 15bit fixed-point numbers (yes, I'm showing my age but fixed-point still has it's value) representing offsets into atlas textures.
So far, so good, but I know bugger all about Direct3D11 and HLSL so I've been wondering if there's a faster way.
Here's the current state of my vertex shader:
cbuffer CB_PROJ
{
matrix model;
matrix modelViewProj;
};
struct VOut
{
float3 position : POSITION;
float3 n : NORMAL;
float2 texcoord : TEXCOORD;
float4 pos : SV_Position;
};
static const float3 position[4] = { -0.5f, 0.0f,-0.5f,-0.5f, 0.0f, 0.5f, 0.5f, 0.0f,-0.5f, 0.5f, 0.0f, 0.5f };
// Index bitpattern: YYYYYYYYYYYYYYYXXXXXXXXXXXXXXXVV
//
// 00-01 . uint2b == Vertex index (0-3)
// 02-17 . fixed1p14 == X offset into atlas texture(s)
// 18-31 . fixed1p14 == Y offset into atlas texture(s)
//
VOut main(uint bitField : BLENDINDICES) {
VOut output;
const uint i = bitField & 0x03u;
const uint xStep = (bitField >> 2) & 0x7FFFu;
const uint yStep = (bitField >> 17);
const float xDelta = float(xStep) * 0.00006103515625f;
const float yDelta = float(yStep) * 0.00006103515625f;
const float2 texCoord = float2(xDelta, yDelta);
output.position = (float3) mul(float4(position[i], 1.0f), model);
output.n = mul(float3(0.0f, 1.0f, 0.0f), (float3x3) model);
output.texcoord = texCoord;
output.pos = mul(float4(output.position, 1.0f), modelViewProj);
return output;
}
My pixel shader for completeness:
Texture2D Texture : register(t0);
SamplerState Sampler : register(s0);
struct LIGHT {
float4 lightPos; // .w == range
float4 lightCol; // .a == flags
};
cbuffer cbLight {
LIGHT l[16] : register(b0); // 256 bytes
}
static const float3 ambient = { 0.15f, 0.15f, 0.15f };
float4 main(float3 position : POSITION, float3 n : NORMAL, float2 TexCoord : TEXCOORD) : SV_Target
{
const float4 Texel = Texture.Sample(Sampler, TexCoord);
if (Texel.a < 0.707106f) discard; // My source images have their alpha values inverted.
float3 result = { 0.0f, 0.0f, 0.0f };
for (uint xx = 0 ; xx < 16 && l[xx].lightCol.a != 0xFFFFFFFF; xx++)
{
const float3 lCol = l[xx].lightCol.rgb;
const float range = l[xx].lightPos.w;
const float3 vToL = l[xx].lightPos.xyz - position;
const float distToL = length(vToL);
if (distToL < range * 2.0f)
{
const float att = min(1.0f, (distToL / range + distToL / (range * range)) * 0.5f);
const float3 lum = Texel.rgb * saturate(dot(vToL / distToL, n)) * lCol;
result += lum * (1.0f - att);
}
}
return float4(ambient * Texel.rgb + result, Texel.a);
}
And the rather busy looking C function to generate the vertex data (all non-relevant functions removed):
al16 struct CLASS_PRIMITIVES {
ID3D11Buffer* pVB = { NULL, NULL }, * pIB = { NULL, NULL };
const UINT strideV1 = sizeof(VERTEX1);
void CreateQuadSet1(ui32 xSegs, ui32 ySegs) {
al16 VERTEX1* vBuf;
al16 D3D11_BUFFER_DESC bd = {};
D3D11_SUBRESOURCE_DATA srd = {};
ui32 index = 0, totalVerts = xSegs * ySegs * 4;
if (pVB) return;
vBuf = (VERTEX1*)_aligned_malloc(strideV1 * totalVerts, 16);
for (ui32 yy = ySegs; yy; yy--)
for (ui32 xx = 0; xx < xSegs; xx++) {
double dyStep2 = 16384.0 / double(ySegs); double dyStep1 = dyStep2 * double(yy); dyStep2 *= double(yy - 1);
ui32 yStep1 = dyStep1;
yStep1 <<= 17;
ui32 yStep2 = dyStep2;
yStep2 <<= 17;
vBuf[index].b = 0 + (ui32(double(16384.0 / double(xSegs) * double(xx))) << 2) + yStep1;
index++;
vBuf[index].b = 1 + (ui32(double(16384.0 / double(xSegs) * double(xx))) << 2) + yStep2;
index++;
vBuf[index].b = 2 + (ui32(double(16384.0 / double(xSegs) * double(xx + 1))) << 2) + yStep1;
index++;
vBuf[index].b = 3 + (ui32(double(16384.0 / double(xSegs) * double(xx + 1))) << 2) + yStep2;
index++;
}
bd.Usage = D3D11_USAGE_IMMUTABLE;
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = 0;
bd.ByteWidth = strideV1 * totalVerts;
bd.StructureByteStride = strideV1;
srd.pSysMem = vBuf;
hr = dev->CreateBuffer(&bd, &srd, &pVB);
if (hr != S_OK) ThrowError();
_aligned_free(vBuf);
};
void DrawQuadFromSet1(ui32 offset) {
offset *= sizeof(VERTEX1) * 4;
devcon->IASetVertexBuffers(0, 1, &pVB, &strideV1, &offset);
devcon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
devcon->Draw(4, 0);
};
void DestroyQuadSet() {
if (pVB) pVB->Release();
};
It's all functioning as it should, but it just seems like I'm resorting to hacks to achieve my goal. Surely there's a faster way? Using DrawIndexed() consistently dropped the frame-rate by 1% so I switched back to non-indexed Draw calls.
reducing vertex data down to 32bits per vertex is as far as the GPU will allow
You seem to think that vertex buffer sizes are what's holding you back. Make no mistake here, they are not. You have many gigs of VRAM to work with, use them if it will make your code faster. Specifically, anything you're unpacking in your shaders that could otherwise be stored explicitly in your vertex buffer should probably be stored in your vertex buffer.
I am wondering if anyone has experience with using geometry shaders to auto-generate quads
I'll stop you right there, geometry shaders are very inefficient in most driver implementations, even today. They just aren't used that much so nobody bothered to optimize them.
One quick thing that jumps at me is that you're allocating and freeing your system-side vertex array every frame. Building it is fine, but cache the array, C memory allocation is about as slow as anything is going to get. A quick profiling should have shown you that.
Your next biggest problem is that you have a lot of branching in your pixel shader. Use standard functions (like clamp or mix) or blending to let the math cancel out instead of checking for ranges or fully transparent values. Branching will absolutely kill performance.
And lastly, make sure you have the correct hints and usage on your buffers. You don't show them, but they should be set to whatever the equivalent of GL_STREAM_DRAW is, and you need to ensure you don't corrupt the in-flight parts of your vertex buffer. Future frames will render at the same time as the current one as long as you don't invalidate their data by overwriting their vertex buffer, so instead use a round-robin scheme to allow as many vertices as possible to survive (again, use memory for performance). Personally I allocate a very large vertex buffer (5x the data a frame needs) and write it sequentially until I reach the end, at which point I orphan the whole thing and re-allocate it and start from the beginning again.
I think your code is CPU bound. While your approach has very small vertices, you have non-trivial API overhead.
A better approach is rendering all quads with a single draw call. I would probably use instancing for that.
Assuming you want arbitrary per-quad size, position, and orientation in 3D space, here’s one possible approach. Untested.
Vertex buffer elements:
struct sInstanceData
{
// Center of the quad in 3D space
XMFLOAT3 center;
// XY coordinates of the sprite in the atlas
uint16_t spriteX, spriteY;
// Local XY vectors of the quad in 3D space
// length of the vectors = half width/height of the quad
XMFLOAT3 plusX, plusY;
};
Input layout:
D3D11_INPUT_ELEMENT_DESC desc[ 4 ];
desc[ 0 ] = D3D11_INPUT_ELEMENT_DESC{ "QuadCenter", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_INSTANCE_DATA, 0 };
desc[ 1 ] = D3D11_INPUT_ELEMENT_DESC{ "SpriteIndex", 0, DXGI_FORMAT_R16G16_UINT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_INSTANCE_DATA, 0 };
desc[ 2 ] = D3D11_INPUT_ELEMENT_DESC{ "QuadPlusX", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_INSTANCE_DATA, 0 };
desc[ 3 ] = D3D11_INPUT_ELEMENT_DESC{ "QuadPlusY", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_INSTANCE_DATA, 0 };
Vertex shader:
cbuffer Constants
{
matrix viewProj;
// Pass [ 1.0 / xSegs, 1.0 / ySegs ] in that field
float2 texcoordMul;
};
struct VOut
{
float3 position : POSITION;
float3 n : NORMAL;
float2 texcoord : TEXCOORD;
float4 pos : SV_Position;
};
VOut main( uint index: SV_VertexID,
float3 center : QuadCenter, uint2 texcoords : SpriteIndex,
float3 plusX : QuadPlusX, float3 plusY : QuadPlusY )
{
VOut result;
float3 pos = center;
int2 uv = ( int2 )texcoords;
// No branches are generated in release builds;
// only conditional moves are there
if( index & 1 )
{
pos += plusX;
uv.x++;
}
else
pos -= plusX;
if( index & 2 )
{
pos += plusY;
uv.y++;
}
else
pos -= plusY;
result.position = pos;
result.n = normalize( cross( plusX, plusY ) );
result.texcoord = ( ( float2 )uv ) * texcoordMul;
result.pos = mul( float4( pos, 1.0f ), viewProj );
return result;
}
Rendering:
UINT stride = sizeof( sInstanceData );
UINT off = 0;
context->IASetVertexBuffers( 0, 1, &vb, &stride, &off );
context->IASetPrimitiveTopology( D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
context->DrawInstanced( 4, countQuads, 0, 0 );

How to implement adaptive subdivision algorithm for curve in C

My homework is to write a C program with openGL/Glut which, after getting groups of 4 points by mouse click (points with 3 coordinates), should draw a bezier curve with adaptive algorithm. At a theoretical level it's clear how the algorithm works but I don't know how to put that in C code. I mean that at lesson we saw that the 4 control points could have a shape similar to a "trapeze" and then the algorithm calculates the two "heights" and then checks if they satisfy a tollerance. The problem is that the user might click everywhere in the screen and the points might not have trapeze-like shape...so, where can I start from? This is all I have
This is the cole I have written, which is called each time a control point is added:
if (bezierMode == CASTELJAU_ADAPTIVE) {
glColor3f (0.0f, 0.8f, 0.4f); /* draw adaptive casteljau curve in green */
for(i=0; i+3<numCV; i += 3)
adaptiveDeCasteljau3(CV, i, 0.01);
}
void adaptiveDeCasteljau3(float CV[MAX_CV][3], int position, float tolerance) {
float x01 = (CV[position][0] + CV[position+1][0]) / 2;
float y01 = (CV[position][1] + CV[position+1][1]) / 2;
float x12 = (CV[position+1][0] + CV[position+2][0]) / 2;
float y12 = (CV[position+1][1] + CV[position+2][1]) / 2;
float x23 = (CV[position+2][0] + CV[position+3][0]) / 2;
float y23 = (CV[position+2][1] + CV[position+3][1]) / 2;
float x012 = (x01 + x12) / 2;
float y012 = (y01 + y12) / 2;
float x123 = (x12 + x23) / 2;
float y123 = (y12 + y23) / 2;
float x0123 = (x012 + x123) / 2;
float y0123 = (y012 + y123) / 2;
float dx = CV[3][0] - CV[0][0];
float dy = CV[3][1] - CV[0][1];
float d2 = fabs(((CV[1][0] - CV[3][0]) * dy - (CV[1][1] - CV[3][1]) * dx));
float d3 = fabs(((CV[2][0] - CV[3][0]) * dy - (CV[2][1] - CV[3][1]) * dx));
if((d2 + d3)*(d2 + d3) < tolerance * (dx*dx + dy*dy)) {
glBegin(GL_LINE_STRIP);
glVertex2f(x0123, y0123);
glEnd();
return;
}
float tmpLEFT[4][3];
float tmpRIGHT[4][3];
tmpLEFT[0][0] = CV[0][0];
tmpLEFT[0][1] = CV[0][1];
tmpLEFT[1][0] = x01;
tmpLEFT[1][1] = y01;
tmpLEFT[2][0] = x012;
tmpLEFT[2][1] = y012;
tmpLEFT[3][0] = x0123;
tmpLEFT[3][1] = y0123;
tmpRIGHT[0][0] = x0123;
tmpRIGHT[0][1] = y0123;
tmpRIGHT[1][0] = x123;
tmpRIGHT[1][1] = y123;
tmpRIGHT[2][0] = x23;
tmpRIGHT[2][1] = y23;
tmpRIGHT[3][0] = CV[3][0];
tmpRIGHT[3][1] = CV[3][1];
adaptiveDeCasteljau3(tmpLEFT, 0, tolerance);
adaptiveDeCasteljau3(tmpRIGHT, 0, tolerance);
}
and obviously nothing is drawn. Do you have any idea?
the Begin / End should engulf your whole loop, not being inside for each isolated vertex !

game project : Explanation Required

I'm working on a OpenGL project and i need some brief explanation on the core components of the subject as i need to explain to somebody needy.
Following is the part of the program
The below are the global variables and header files used in the program
#include<GL/glut.h>
#include<math.h>
#include<stdbool.h>
#define PI 3.14159265f
#include<stdio.h>
GLfloat ballRadius = 0.2,xradius=0.2,xxradius=1.0;
GLfloat ballX = 0.0f;
GLfloat ballY = 0.0f;
GLfloat ballXMax,ballXMin,ballYMax,ballYMin;
GLfloat xSpeed = 0.02f;
GLfloat ySpeed = 0.007f;
int refreshMills = 30;
GLfloat angle=0.0;
int xa,ya;
int flag=0,flag1=0;
int score = 0;
void *currentfont;
GLfloat xo=0, yo=0, x, y;
GLdouble clipAreaXLeft,clipAreaXRight,clipAreaYBottom,clipAreaYTop;
void balldisp() ;
void scoredisp();
This is the reshape function. I need to do what exactly it is doing, what it is calculating and storing. Confused here
void reshape(GLsizei width,GLsizei height)
{
GLfloat aspect = (GLfloat)width / (GLfloat)height;
glViewport(0,0,width,height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
if(width >=height)
{
clipAreaXLeft = -1.0 * aspect;
clipAreaXRight = 1.0 * aspect;
clipAreaYBottom = -1.0;
clipAreaYTop = 1.0;
}
else
{
clipAreaXLeft = -1.0;
clipAreaXRight = 1.0 ;
clipAreaYBottom = -1.0 / aspect;
clipAreaYTop = 1.0/ aspect;
}
gluOrtho2D(clipAreaXLeft,clipAreaXRight,clipAreaYBottom,clipAreaYTop+0.10);
ballXMin = clipAreaXLeft + ballRadius;
ballXMax = clipAreaXRight - ballRadius;
ballYMin = clipAreaYBottom + ballRadius;
ballYMax = clipAreaYTop - ballRadius;
}
The below is the code to display the ball. What it is calculating and how the speed and direction is set. Confused here
void balldisp()
{
glTranslatef(ballX,ballY,0.0f);
glBegin(GL_TRIANGLE_FAN);
color();
glVertex2f(0.0f,0.0f);
int numSegments = 100;
int i;
for(i=0;i<=numSegments;i++)
{
angle = i*2.0f*PI/numSegments;
glVertex2f(cos(angle)*ballRadius,sin(angle)*ballRadius);
}
glEnd();
ballX += xSpeed;
ballY += ySpeed;
if(ballX > ballXMax)
{ xa=ballX;
ballX = ballXMax;
xSpeed = -xSpeed;
}
else if(ballX < ballXMin)
{ xa=ballX;
ballX = ballXMin;
xSpeed = -xSpeed;
}
if(ballY > ballYMax)
{ ya=ballY;
ballY = ballYMax;
ySpeed = -ySpeed;
}
else if(ballY < ballYMin)
{ ya=ballY;
ballY = ballYMin;
ySpeed = -ySpeed;
}
I want to know the reshape function and ball display. What are they doing and how things are done there.
P.S. The project is about random motion of the ball which strikes the boundaries of the window and moves in other direction
The reshape function is registered with GLUT (using glutReshapeFunc) so that it gets called by GLUT whenever the size of the window changes. Note that placing OpenGL functions for setting the viewport and/or the projection matrix in the reshape function is bad style and should be avoided. All OpenGL drawing related functions (which glViewport and the matrix setup are) belong into the display functions.
Similarly the display function is registered with GLUT (using glutDisplayFunc) so that it gets called by GLUT whenever the windows needs to be redrawn (either because it got visible, contents need refreshing or redraw has been requested with glutPostRedisplay).

Implementing basic Raytracer in C

So I am currently working on a tutorial to implement a very basic raytracer (currently just drawing solid spheres). Said tutorial is located here: http://thingsiamdoing.com/intro-to-ray-tracing/
The tutorial is completely language agnostic and deals only in pseudocode. I attempted to convert this pseudocode into C but have encountered difficulty. My program compiles fine, yet the outputted .ppm image file experiences an early EOF error. The lack of information about the problem has left me stuck.
Here is my C code, which is meant to be a direct translation of the pseudocode:
#include <stdio.h>
#include <math.h>
#define WIDTH 512
#define HEIGHT 512
typedef struct {
float x, y, z;
} vector;
float vectorDot(vector *v1, vector *v2) {
return v1->x * v2->x + v1->y * v2->y + v1->z * v2->z;
}
void writeppm(char *filename, unsigned char *img, int width, int height){
FILE *f;
f = fopen(filename, "w");
fprintf(f, "P6 %d %d %d\n", width, height, 255);
fwrite(img, 3, width*height, f);
fclose(f);
}
float check_ray(px, py, pz, dx, dy, dz, r) {
vector v1 = {px, py, pz};
vector v2 = {dx, dy, dz};
float det, b;
b = -vectorDot(&v1, &v2);
det = b*b - vectorDot(&v1, &v1) + r*r;
if (det<0)
return -1;
det = sqrtf(det);
float t1 = b - det;
float t2 = b + det;
return t1;
}
int main(void) {
int img[WIDTH*HEIGHT*3], distToPlane;
float cameraY, cameraZ, cameraX, pixelWorldX, pixelWorldY, pixelWorldZ, amp, rayX, rayY, rayZ;
for (int px = 0; px<WIDTH; px++) {
for (int py = 0; py<HEIGHT; py++) {
distToPlane = 100;
pixelWorldX = distToPlane;
pixelWorldY = (px - WIDTH / 2) / WIDTH;
pixelWorldZ = (py - HEIGHT / 2) / WIDTH;
rayX = pixelWorldX - cameraX;
rayY = pixelWorldY - cameraY;
rayZ = pixelWorldZ - cameraZ;
amp = 1/sqrtf(rayX*rayX + rayY*rayY + rayZ*rayZ);
rayX *= amp;
rayY *= amp;
rayZ *= amp;
if (check_ray(50, 50, 50, rayX, rayY, rayZ, 50)) {
img[(py + px*WIDTH)*3 + 0] = 0;
img[(py + px*WIDTH)*3 + 1] = 0;
img[(py + px*WIDTH)*3 + 2] = 128;
}
else {
img[(py + px*WIDTH)*3 + 0] = 255;
img[(py + px*WIDTH)*3 + 1] = 255;
img[(py + px*WIDTH)*3 + 2] = 255;
}
}
}
writeppm("image.ppm", "img", WIDTH, HEIGHT);
}
I am fairly confident the error does not lie with my function to write the .ppm file as I have used this for other work and it has been fine.
You may want to remove the quotes from around "img" in the following line of code:
writeppm("image.ppm", "img", WIDTH, HEIGHT);
seeing as how its prototype is void writeppm(char *, unsigned char *, int, int), although I am surprised that your compiler didn't at least give you a warning about a type mismatch.
Also, for the record, I would suggest putting some error checking code (like checking the return value of fwrite, or checking the return value of fopen)--- but that's just me.
Also, if you are not, please compile with all the warning enabled (eg with gcc use -ansi -Wall -pedantic), this will help you catch type mismatches and other little gotcha-ya's
I see two errors in main
int img[WIDTH*HEIGHT*3];
...
writeppm("image.ppm", "img", WIDTH, HEIGHT);
should be
unsigned char img[WIDTH*HEIGHT*3];
...
writeppm("image.ppm", img, WIDTH, HEIGHT);

Using ROI from camera feed as Template for cvMatchTemplate

This is code that im rewriting that i wrote successfully before.
its suppose to use a a roi from a webcam and match it with cvMatchTemplate against other webcam frames...I took out the trackbars and windows to keep it short per guidelines but in the original you could move the trackbars to select a section of the frame in the top left window and in the bottom left window you saw your template
here is a picture of what it looked like:
http://i983.photobucket.com/albums/ae313/edmoney777/Screenshotfrom2013-10-21112021_zpsae11e3f0.png
Here is the error im getting
I tried changing the depth of src to 32F with no luck...read the templmatch.cpp
line 384 the error mssg gave me but no help there
OpenCV Error: Assertion failed (result.size() == cv::Size(std::abs
(img.cols - templ.cols) + 1, std::abs(img.rows - templ.rows) + 1)
&& result.type() == CV_32F) in cvMatchTemplat
Im new to opencv and could use a little help debugging the code below
#include <cv.h>
#include <highgui.h>
using namespace std;
int main(){
CvCapture* capture =0;
capture = cvCaptureFromCAM(0);
if(!capture){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
double width=640.0;
double height=480.0;
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, width);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, height);
while(true){
frame = cvQueryFrame(capture);
if(!frame) break;
frame=cvCloneImage(frame);
IplImage *src, *templ, *ftmp[6]; // ftmp will hold results
int i;
CvRect roi;
int rectx = 0;
int recty = 0;
int rectwidth = frame->width /10;
int rectheight = frame->height /10;
IplImage* img = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
// Read in the source image to be searched
src = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
roi=cvRect(rectx, recty, rectwidth, rectheight);
img = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
src = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
cvCopy(frame, img);
cvSetImageROI(frame, roi);
cvShowImage( "b", img );
cvReleaseImage(&img);
// Allocate Output Images:
int iwidth = src->width - frame->width + 1;
int iheight = src->height - frame->height + 1;
for(i = 0; i < 6; ++i){
ftmp[i]= cvCreateImage( cvSize( iwidth, iheight ), 32, 1 );
}
// Do the matching of the template with the image
for( i = 0; i < 6; ++i ){
cvMatchTemplate( src, frame, ftmp[i], i );
cvNormalize( ftmp[i], ftmp[i], 1, 0, CV_MINMAX );
}
// DISPLAY
cvReleaseImage(&src);
cvResetImageROI(frame);
cvReleaseImage(&frame);
//Wait 50mS
int c = cvWaitKey(10);
//If 'ESC' is pressed, break the loop
if((char)c==27 ) break;
}
cvDestroyAllWindows() ;
cvReleaseCapture(&capture);
return 0;
}
I am new to OpenCV and really don't know what to do with this error-message. Anyone an idea/pointer what to do? Your help is very appreciated! Cheers,
As you are performing template matching on the selected ROI of the image, you have to create the output images according to the size of ROI.
// Allocate Output Images:
int iwidth = src->width - rectwidth + 1;
int iheight = src->height - rectheight + 1;
Your code contains memory leaks which may crash the program e.g. all 6 images of ftmp are being allocated in each iteration but not being released anywhere. Either release the images at the end of iteration, or create them only once before while loop.
Also, OpenCV documentation explicitly states not to modify the frame returned by cvQueryFrame. So you may consider removing cvReleaseImage(&frame);. Check this answer for details.

Resources