Using ROI from camera feed as Template for cvMatchTemplate - c

This is code that im rewriting that i wrote successfully before.
its suppose to use a a roi from a webcam and match it with cvMatchTemplate against other webcam frames...I took out the trackbars and windows to keep it short per guidelines but in the original you could move the trackbars to select a section of the frame in the top left window and in the bottom left window you saw your template
here is a picture of what it looked like:
http://i983.photobucket.com/albums/ae313/edmoney777/Screenshotfrom2013-10-21112021_zpsae11e3f0.png
Here is the error im getting
I tried changing the depth of src to 32F with no luck...read the templmatch.cpp
line 384 the error mssg gave me but no help there
OpenCV Error: Assertion failed (result.size() == cv::Size(std::abs
(img.cols - templ.cols) + 1, std::abs(img.rows - templ.rows) + 1)
&& result.type() == CV_32F) in cvMatchTemplat
Im new to opencv and could use a little help debugging the code below
#include <cv.h>
#include <highgui.h>
using namespace std;
int main(){
CvCapture* capture =0;
capture = cvCaptureFromCAM(0);
if(!capture){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
double width=640.0;
double height=480.0;
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, width);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, height);
while(true){
frame = cvQueryFrame(capture);
if(!frame) break;
frame=cvCloneImage(frame);
IplImage *src, *templ, *ftmp[6]; // ftmp will hold results
int i;
CvRect roi;
int rectx = 0;
int recty = 0;
int rectwidth = frame->width /10;
int rectheight = frame->height /10;
IplImage* img = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
// Read in the source image to be searched
src = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
roi=cvRect(rectx, recty, rectwidth, rectheight);
img = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
src = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
cvCopy(frame, img);
cvSetImageROI(frame, roi);
cvShowImage( "b", img );
cvReleaseImage(&img);
// Allocate Output Images:
int iwidth = src->width - frame->width + 1;
int iheight = src->height - frame->height + 1;
for(i = 0; i < 6; ++i){
ftmp[i]= cvCreateImage( cvSize( iwidth, iheight ), 32, 1 );
}
// Do the matching of the template with the image
for( i = 0; i < 6; ++i ){
cvMatchTemplate( src, frame, ftmp[i], i );
cvNormalize( ftmp[i], ftmp[i], 1, 0, CV_MINMAX );
}
// DISPLAY
cvReleaseImage(&src);
cvResetImageROI(frame);
cvReleaseImage(&frame);
//Wait 50mS
int c = cvWaitKey(10);
//If 'ESC' is pressed, break the loop
if((char)c==27 ) break;
}
cvDestroyAllWindows() ;
cvReleaseCapture(&capture);
return 0;
}
I am new to OpenCV and really don't know what to do with this error-message. Anyone an idea/pointer what to do? Your help is very appreciated! Cheers,

As you are performing template matching on the selected ROI of the image, you have to create the output images according to the size of ROI.
// Allocate Output Images:
int iwidth = src->width - rectwidth + 1;
int iheight = src->height - rectheight + 1;
Your code contains memory leaks which may crash the program e.g. all 6 images of ftmp are being allocated in each iteration but not being released anywhere. Either release the images at the end of iteration, or create them only once before while loop.
Also, OpenCV documentation explicitly states not to modify the frame returned by cvQueryFrame. So you may consider removing cvReleaseImage(&frame);. Check this answer for details.

Related

libjpeg reads a scanline 1/8th the original width

I am trying to just read every pixel in a jpeg image. When I read a scanline, it appears that the image has been squashed to one eighth the size of the original image. The scanline is the correct width but the remaining 7/8'ths of the scanline are not filled (0, 0, 0).
I cant simply use each pixel 8 times since a large amount of information was lost.
I don't use any decompression parameters and am perfectly happy with the defaults. The Libjpeg version I am using is "libjpeg/9d" from the https://conan.io package center.
How can I get scanlines in the correct aspect ratio?
FILE* file_p = openfile(fileaddress);
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr err; //the error handler
cinfo.err = jpeg_std_error( &err );
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, file_p);
int result = jpeg_read_header(&cinfo, TRUE);
bool startedfine = jpeg_start_decompress(&cinfo);
int row_stride = cinfo.output_width * cinfo.output_components;
image output = create_image(cinfo.output_width, cinfo.output_height);
JSAMPARRAY buffer = calloc(1, sizeof(JSAMPROW*));
buffer[0] = calloc(1, sizeof(JSAMPROW) * row_stride);
while (cinfo.output_scanline < cinfo.output_height) {
int current_y = cinfo.output_scanline;
jpeg_read_scanlines(&cinfo, buffer, 1);
JSAMPROW row = buffer[0];
for(int row_i = 0; row_i < row_stride; row_i += 3) {
int r = (int)row[row_i];
int g = (int)row[row_i + 1];
int b = (int)row[row_i + 2];
int actual_x = row_i / 3;
output.pixels_array_2d[actual_x][current_y].r = r;
output.pixels_array_2d[actual_x][current_y].g = b;
output.pixels_array_2d[actual_x][current_y].b = g;
}
}
free(buffer[0]);
free(buffer);
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(file_p);
As a side note, you may notice I assign the blue value to the green value when copying the pixels. This is because without it, my test image is the wrong colour and I don't know why.

Issue displaying IDirect3DTexture8 after backporting from IDirect3DTexture9

I'm trying to backport someones Direct3d9 port of Quake 1 by ID software to Direct3d8 so I can port it to the original Xbox (only uses the D3D8 API).
After making the changes to use Direct3d8 it displays some mashed up pixels on the screen that appear to be in little squares :/ (see pictures).
Does anyone know whats gone wrong here? It works flawlessly with D3D9, is there some extra arguments required that I'm missing require for D3D8, rect pitch maybe?
The data been passed in is a Quake 1 .lmp 2d image file. "It consists of two integers (width and height) followed by a string of width x height bytes, each of which is an index into the Quake palette"
Its been passed to the D3D_ResampleTexture() function.
Any help would be much appreciated.
Image output using D3D8
Image output using D3D9
The code:
void D3D_ResampleTexture (image_t *src, image_t *dst)
{
int y, x , srcpos, srcbase, dstpos;
unsigned int *dstdata, *srcdata;
// take an unsigned pointer to the dest data that we'll actually fill
dstdata = (unsigned int *) dst->data;
// easier access to src data for 32 bit resampling
srcdata = (unsigned int *) src->data;
// nearest neighbour for now
for (y = 0, dstpos = 0; y < dst->height; y++)
{
srcbase = (y * src->height / dst->height) * src->width;
for (x = 0; x < dst->width; x++, dstpos++)
{
srcpos = srcbase + (x * src->width / dst->width);
if (src->flags & IMAGE_32BIT)
dstdata[dstpos] = srcdata[srcpos];
else if (src->palette)
dstdata[dstpos] = src->palette[src->data[srcpos]];
else Sys_Error ("D3D_ResampleTexture: !(flags & IMAGE_32BIT) without palette set");
}
}
}
void D3D_LoadTextureStage3 (LPDIRECT3DTEXTURE8/*9*/ *tex, image_t *image)
{
int i;
image_t scaled;
D3DLOCKED_RECT LockRect;
memset (&LockRect, 0, sizeof(D3DLOCKED_RECT));
// check scaling here first
for (scaled.width = 1; scaled.width < image->width; scaled.width *= 2);
for (scaled.height = 1; scaled.height < image->height; scaled.height *= 2);
// clamp to max texture size
if (scaled.width > /*d3d_DeviceCaps.MaxTextureWidth*/640) scaled.width = /*d3d_DeviceCaps.MaxTextureWidth*/640;
if (scaled.height > /*d3d_DeviceCaps.MaxTextureHeight*/480) scaled.height = /*d3d_DeviceCaps.MaxTextureHeight*/480;
IDirect3DDevice8/*9*/_CreateTexture(d3d_Device, scaled.width, scaled.height,
(image->flags & IMAGE_MIPMAP) ? 0 : 1,
/*(image->flags & IMAGE_MIPMAP) ? D3DUSAGE_AUTOGENMIPMAP :*/ 0,
(image->flags & IMAGE_ALPHA) ? D3DFMT_A8R8G8B8 : D3DFMT_X8R8G8B8,
D3DPOOL_MANAGED,
tex
);
// lock the texture rectangle
//(*tex)->LockRect (0, &LockRect, NULL, 0);
IDirect3DTexture8/*9*/_LockRect(*tex, 0, &LockRect, NULL, 0);
// fill it in - how we do it depends on the scaling
if (scaled.width == image->width && scaled.height == image->height)
{
// no scaling
for (i = 0; i < (scaled.width * scaled.height); i++)
{
unsigned int p;
// retrieve the correct texel - this will either be direct or a palette lookup
if (image->flags & IMAGE_32BIT)
p = ((unsigned *) image->data)[i];
else if (image->palette)
p = image->palette[image->data[i]];
else Sys_Error ("D3D_LoadTexture: !(flags & IMAGE_32BIT) without palette set");
// store it back
((unsigned *) LockRect.pBits)[i] = p;
}
}
else
{
// save out lockbits in scaled data pointer
scaled.data = (byte *) LockRect.pBits;
// resample data into the texture
D3D_ResampleTexture (image, &scaled);
}
// unlock it
//(*tex)->UnlockRect (0);
IDirect3DTexture8/*9*/_UnlockRect(*tex, 0);
// tell Direct 3D that we're going to be needing to use this managed resource shortly
//FIXME
//(*tex)->PreLoad ();
}
LPDIRECT3DTEXTURE8/*9*/ D3D_LoadTextureStage2 (image_t *image)
{
d3d_texture_t *tex;
// look for a match
// create a new one
tex = (d3d_texture_t *) malloc (sizeof (d3d_texture_t));
// link it in
tex->next = d3d_Textures;
d3d_Textures = tex;
// fill in the struct
tex->LastUsage = 0;
tex->d3d_Texture = NULL;
// copy the image
memcpy (&tex->TexImage, image, sizeof (image_t));
// upload through direct 3d
D3D_LoadTextureStage3 (&tex->d3d_Texture, image);
// return the texture we got
return tex->d3d_Texture;
}
LPDIRECT3DTEXTURE8/*9*/ D3D_LoadTexture (char *identifier, int width, int height, byte *data, /*bool*/qboolean mipmap, /*bool*/qboolean alpha)
{
image_t image;
image.data = data;
image.flags = 0;
image.height = height;
image.width = width;
image.palette = d_8to24table;
strcpy (image.identifier, identifier);
if (mipmap) image.flags |= IMAGE_MIPMAP;
if (alpha) image.flags |= IMAGE_ALPHA;
return D3D_LoadTextureStage2 (&image);
}
When you lock the texture, you have to observe the returned Pitch member of the D3DLOCKED_RECT structure. Your code is assuming that all the data is contiguous, but the Pitch can be larger than the width of a scanline in order to allow for locking a subregion and other layouts of the buffer that don't have contiguous pixels at the end of one scanline to the beginning of the next.
Look at Chapter 4 of my book "The Direct3D Graphics Pipeline" to see an example of accessing a surface and using the Pitch properly.
For anyone else that comes across this issue, it was due to the way the image was been loaded into the Xbox's memory, it needed to be swizzled.

OpenGL ios 6 screenshot

I am trying to get screenshot of NinevehGL object which is 3D library build on top of OpenGL. So i think OpenGL commands would work here as well.
Problem is screenshot works fine in iOS 5 or earlier but not with iOS6. It works fine in iOS6 simulator as well but not on iOS 6 device. It gives me black background. I have read many posts most of them suggest to put
CAEAGLLayer *eaglLayer = (CAEAGLLayer *) [self theNGLView].layer;
[eaglLayer setDrawableProperties:#{
kEAGLDrawablePropertyRetainedBacking: [NSNumber numberWithBool:YES],
kEAGLDrawablePropertyColorFormat: kEAGLColorFormatRGBA8
}];
in init method. which i did but with no luck.
I am using below method to get screenshot.
-(UIImage *)getImage{
GLint width;
GLint height;
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &width);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &height);
NSLog(#"%d %d",width,height);
NSInteger myDataLength = width * height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width * 4; x++)
{
buffer2[((height - 1) - y) * width * 4 + x] = buffer[y * 4 * width + x];
}
}
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
free(buffer);
free(buffer2);
return myImage;
}

How to find corner coordinates of Image contour in Opencv and C

I have to find corner points of an image so that I can crop it in rectangular shape.I have already found the contour and used approxpoly() function on it.Now how to find the corner Co-ordinates of the contour ?
Here is my C code->
#include <cv.h>
#include <highgui.h>
int main(int argc, char** argv)
{
IplImage *img,*gray;
if((img = cvLoadImage("save.jpg", 1)) == NULL)
{
printf("A Img open error\n");
}
gray=cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );
cvCvtColor(img,gray,CV_BGR2GRAY);
IplImage* out_median = cvCreateImage(cvGetSize(gray),IPL_DEPTH_8U,1);
cvSmooth( gray,out_median,CV_MEDIAN,3);
IplImage* out_threshold = cvCreateImage( cvGetSize(out_median), out_median->depth, 1);
cvThreshold(out_median,out_threshold,1,255,CV_THRESH_BINARY);
CvMemStorage* storage = cvCreateMemStorage();
CvSeq* first_contour = NULL;
cvFindContours(out_threshold,storage,&first_contour,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
CvSeq* approx_polygon = NULL;
approx_polygon=cvApproxPoly(first_contour,sizeof(CvContour),storage,CV_POLY_APPROX_DP,0.01*cvArcLength(first_contour,CV_WHOLE_SEQ, 1),0);
//cvDrawContours(out_threshold,approx_polygon,cvScalarAll(255),cvScalarAll(255),100);
//cvShowImage("Contours", out_threshold );
//cvSaveImage("save_approxpoly_contour.jpg",out_threshold);
cvWaitKey(0);
return 0;
}
This my Contour Image after applying the Approxpoly()
I recommend using cvBlob:
Get the blobs:
cvThreshold(&src, &dst, 10, 255, CV_THRESH_BINARY);
IplImage *labelImg = cvCreateImage(cvGetSize(&dst),IPL_DEPTH_LABEL,1);
CvBlobs blobs;
unsigned int result = cvLabel(&dst, labelImg, blobs);
If you have more than one item, find the limits
int borders[4];
borders[0] = 99999; //left
borders[1] = 99999; //top
borders[2] = 0; //right
borders[3] = 0; //bottom
for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it)
{
if((*it).second->maxx) < borders[0]){
borders[0] = (*it).second->maxx;
}
if(((*it).second->miny) < borders[1]){
borders[1] = (*it).second->miny;
}
if((*it).second->minx) > borders[2]){
borders[2] = (*it).second->minx;
}
if(((*it).second->maxy) > borders[3]){
borders[3] = (*it).second->maxy;
}
}

Tracking objects using histogram data in OpenCV

I am trying to track objects inside an image using histogram data from the object. I pass in a reference image to get the histogram data and store it in a Mat. From there I load in an image and try and use the histogram data to detect the object. The problem I am coming with is not only is it not tracking the object, but it's not updating the detection. If I load image "1.jpg" the detection will claim that the object is in the top right corner when it's in the bottom left. When I pass in the second image the detection field does not move at all. This continues for the next batch of images as well. Below is a code snippet of my application.
This is being done in a Windows 7 32-bit environment using OpenCV2.3 in VS2010. Thanks in advance for any help
int main( int argc, char** argv )
{
vector<string> szFileNames;
IplImage* Image;
Mat img, hist, backproj;
Rect trackWindow;
// Load histogram data
hist = ImageHistogram("C:/Users/seb/Documents/redbox1.jpg", backproj);
Image = cvLoadImage("C:/Users/seb/Documents/1.jpg");
img = Mat(Image);
trackWindow = Rect(0, 0, Image->width, Image->height);
imshow("Histogram", hist);
while(true)
{
Detection(img, backproj, trackWindow);
imshow("Image", img);
char c = cvWaitKey(1);
switch(c)
{
case 32:
{
cvReleaseImage(&Image);
Image = cvLoadImage("C:/Users/seb/Documents/redbox2.jpg");
img = Mat(Image);
break;
}
}
}
cvReleaseImage(&Image);
// Destroy all windows
cvDestroyWindow("Histogram");
cvDestroyWindow("Image");
return 0;
}
Mat ImageHistogram(string szFilename, Mat& backproj)
{
// Create histogram values
int vmin = 10;
int vmax = 256;
int smin = 30;
int hsize = 16;
float hranges[] = {0,180};
const float* phranges = hranges;
// Load the image
IplImage* Image = cvLoadImage(szFilename.c_str());
Rect rect = Rect(0, 0, Image->width, Image->height);
// Convert Image to a matrix
Mat ImageMat = Mat(Image);
// Create and initialize the Histogram
Mat hsv, mask, hue, hist, histimg = Mat::zeros(200, 320, CV_8UC3);
cvtColor(ImageMat, hsv, CV_BGR2HSV);
// Create and adjust the histogram values
inRange(hsv, Scalar(0, smin, vmin), Scalar(180, 256, vmax), mask);
int ch[] = {0, 0};
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, 1, &hue, 1, ch, 1);
Mat roi(hue, rect), maskroi(mask, rect);
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
normalize(hist, hist, 0, 255, CV_MINMAX);
histimg = Scalar::all(0);
int binW = histimg.cols / hsize;
Mat buf(1, hsize, CV_8UC3);
for( int i = 0; i < hsize; i++ )
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
cvtColor(buf, buf, CV_HSV2BGR);
for( int i = 0; i < hsize; i++ )
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
rectangle( histimg, Point(i*binW,histimg.rows),
Point((i+1)*binW,histimg.rows - val),
Scalar(buf.at<Vec3b>(i)), -1, 8 );
}
calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
backproj &= mask;
cvReleaseImage(&Image);
return histimg;
}
void Detection(Mat& image, Mat& backproj, Rect& trackWindow)
{
RotatedRect trackBox = CamShift(backproj, trackWindow, TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));
int test2 = trackWindow.area();
if(trackBox.size.height > 0 && trackBox.size.width > 0)
{
if( trackWindow.area() <= 1 )
{
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
trackWindow.x + r, trackWindow.y + r) &
Rect(0, 0, cols, rows);
}
int test = trackBox.size.area();
if(test >= 1)
{
rectangle(image, trackBox.boundingRect(), Scalar(255,0,0), 3, CV_AA);
ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA );
}
}
}
I've figured out the issue. It had to deal with me not converting the image that I'm checking upon. I had to get histogram data from my colored box and then I had to get the histogram from the image I was using to search.

Resources