Webcam doesn't work with openCV 1.0 C (CodeBlocks) - c

I got a problem, I want a window with what my webcam displays; but what I have is a windows with ... a black
I tried to got image dimensions, but it's a 0x0
My webcam is good, It works with displaying of Windows!!
this's ma code
(Windows 7 x64 bits)
#include <iostream>
#include <cv.h>
#include <highgui.h>
using namespace std;
int main()
{
IplImage* img;
CvCapture* capture = cvCaptureFromCAM (CV_CAP_ANY);
if (!capture)
return 10;
cvNamedWindow("video", CV_WINDOW_AUTOSIZE);
char key = 'a';
if (!cvGrabFrame(capture))
return 20;
while (key != 'q'){
img = cvRetrieveFrame(capture);
cvShowImage("video", img);
key = cvWaitKey(60);
if (!cvGrabFrame(capture))
key = 'q';
}
cvDestroyAllWindows();
img = NULL;
cvReleaseCapture(&capture);
return 0;
}

Related

I am getting fatal error when i run my code OpenCV code using OpenMP(C/C++)

I want to parallelize the execution of this program, but when i run this,i got a fatal error. OpenCV parallelization with OpenMP using C.Your Help will be appreciated. I am newbee to OpenMP and OpenCV.
#include <cv.h>
#include <cvaux.h>
#include <iostream>
#include <cxcore.h>
#include <highgui.h>
#include <omp.h>
using namespace cv;
using namespace std;
int main(int argc, char **argv)
{
int no_of_frames = 0,i,j;
int fps = 0;
int mid_frame = 0;
CvCapture* capture = cvCaptureFromFile("/home/nagaraj/ImageVideo/tunnel.avi");
CvCapture* captureNew = cvCaptureFromFile("/home/nagaraj/ImageVideo/tunnel.avi");
if (capture == NULL)
{
printf("Error: Can't open video.\n");
return -1;
}
if (capture == NULL)
{
printf("Error: Can't open video.\n");
return -1;
}
fps = (int)cvGetCaptureProperty(capture,CV_CAP_PROP_FPS);
no_of_frames = (int)cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_COUNT);
mid_frame = no_of_frames/2;
omp_set_num_threads(2);
#pragma omp parallel sections
{
#pragma omp section
{
//cvSetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES,0);
IplImage* img = cvQueryFrame(capture);
cvNamedWindow("Window1",CV_WINDOW_AUTOSIZE);
cvShowImage("Window1",img);
cvWaitKey(10000);
cvReleaseImage(&img);
cvDestroyWindow("Window1");
cvReleaseCapture(&capture);
}
#pragma omp section
{
cvSetCaptureProperty(captureNew,CV_CAP_PROP_POS_FRAMES,(double)mid_frame-1);
IplImage* img = cvQueryFrame(captureNew);
cvNamedWindow("Window2",CV_WINDOW_AUTOSIZE);
cvShowImage("Window2",img);
cvWaitKey(10000);
cvReleaseImage(&img);
cvDestroyWindow("Window2");
cvReleaseCapture(&captureNew);
}
}
}
There are several potential problems in the code:
The most probable cause of is cvReleaseImage() being called to free a resource that wasn't created by you. In this case, the IplImage was created internally by cvQueryFrame(). Doing this can crash your application.
Print the values of fps, no_of_frames and mid_frame to the screen. The functions that are called to retrieve these values can fail and return 0, which would then cause a problem in your application since you pass some of these data to cvSetCaptureProperty().
Sometimes cvQueryFrame() can return NULL indicating that there was some problem while retrieving a frame. Passing a NULL frame to cvShowImage() can also crash your application:
IplImage* img = cvQueryFrame(capture);
if (img == NULL) {
// handle error
}

I want to display output image in different windows in OpenCV

I want to display the same image multiple times in different windows, for which I have used a for loop but I am getting only one window display. Can anybody provide me any suggestion on how to display output images in multiple windows? Following is the code in OpenCV with C API. Here, I am simply loading an image from argv[1] and trying to display it in 4 different windows.
#include "cv.h"
#include "highgui.h"
#include <stdlib.h>
#include <stdio.h>
int main( int argc, char** argv ) {
int i;
IplImage* img = cvLoadImage( argv[1],1);
cvMoveWindow("Example1", 100, 100);
cvNamedWindow( "Example1", 1);
for(i =0; i<=4;i++) // for loop to display the same image in 4 different windows
{
cvShowImage( "Example1", img );
}
cvWaitKey(0);
cvReleaseImage( &img );
cvDestroyWindow( "Example1" );
}
P.S. I have asked a similar question show multiple images in different window in OpenCV before which was not solved and the code was difficult to understand so I am trying this question with a simpler code.
Here you go
int i;
IplImage* img = cvLoadImage("/home/khashayar/Downloads/bug14.png", 1);
cvMoveWindow("Example1", 100, 100);
cvNamedWindow("Example1", 1);
for (i = 0; i <= 4; i++)
{
char str[5] = "test";
str[4] = i+48;
cvShowImage(str, img);
}
cvWaitKey(0);
cvReleaseImage(&img);
cvDestroyWindow("Example1");
for(i =0; i<=4;i++) // hmm, i<=4 will actually run 5 times ...
{
cvShowImage( "Example1", img ); // <-- same window name for all == only 1 shown
}
but, please discard the c api and use c++ !. please !
#include "opencv2/opencv.hpp"
int main( int argc, char** argv ) {
cv::Mat img = cv::imread( argv[1],1);
for( int i=0; i<4; i++ ) // for loop to display the same image in 4 different windows
{
cv::String name = cv::format("Example%d",i);
cv::namedWindow( name, 1);
cv::imshow( name, img );
}
cv::waitKey(0);
}

How to directly query the camera about image luminance/ Skip compentation in OpenCV

I am developing a program in VS 2010 using OpenCV. I want to measure the luminance of every frame that the computer's camera captures. However, the camera's software stabilizes the luminance after 2-3 frames. Eg, if i put my thumb in front of the camera the first frame's luminance is 2 (scale from 0 to 255), but then while keeping my thumb in front of the camera the luminance becomes 7 and the 20 - it is stabilized there for the next frames. So the camera tries to make too dark pictures brighter and too bright pictures darker.
How can i measure the actual luminance without the camera's interference?
My code is:
#ifdef _CH_
#pragma package <opencv>
#endif
#include "stdafx.h"
#include <highgui.h>
#include "cv.h"
#include <stdio.h>
#include <stdlib.h>
#include "..\utilities.h"
int _tmain(int argc, _TCHAR* argv[])
{
FILE *file;
IplImage *img;
IplImage* grayscale_image;
int c, i, j, Luminance = 0, Pixel_Num = 0;
int Avg_Luminance;
int width_step;
int pixel_step;
// allocate memory for an image
// capture from video device #1
CvCapture* capture;
// create a window to display the images
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
// position the window
cvMoveWindow("mainWin", 5, 5);
while(1)
{
if(file = fopen("luminance_value.txt", "w+"))
{
// retrieve the captured frame
capture= cvCaptureFromCAM(1);
img=cvQueryFrame(capture);
grayscale_image = cvCreateImage( cvGetSize(img), 8, 1 );
cvConvertImage( img, grayscale_image );
width_step= grayscale_image->widthStep;
pixel_step= grayscale_image->widthStep/grayscale_image->width;
Pixel_Num = grayscale_image->width * grayscale_image->height;
for(i = 0; i < grayscale_image->height; i++)
{
for(j = 0; j < grayscale_image->width; j++)
{
unsigned char* point = GETPIXELPTRMACRO( grayscale_image, j, i, width_step, pixel_step);
Luminance += point[0];
}
}
Avg_Luminance = Luminance / Pixel_Num;
//Avg_Luminance = cvGetCaptureProperty(capture,CV_CAP_PROP_BRIGHTNESS);
//file = fopen("luminance_value.txt", "w+");
fprintf(file, "%d", Avg_Luminance);
fclose(file);
printf("Avg_Luminance = %d\n", Avg_Luminance);
Luminance = 0;
Pixel_Num = 0;
// show the image in the window
cvShowImage("mainWin", grayscale_image );
cvReleaseCapture(&capture);
// wait 10 ms for a key to be pressed
c=cvWaitKey(10000);
// escape key terminates program
if(c == 27)
break;
}
else
{
continue;
}
}
return 0;
}

Threading face_detection on camera with opencv

I am training to use thread on face,nose,eyes detection. Because, when I did not, the camera is working very slowly. I wrote this code. I can not find mistake in code. But when I compiled it is giving exception on taskCollection tab pChore->m_pFunction(pChore); error.
#include <iostream>
#include "cv.h"
#include "highgui.h"
#include <pthread.h>
#include <stdio.h>
struct parameter_t{
IplImage* capturedImg;
CvHaarClassifierCascade* pCascade_face;
CvMemStorage* storage;
};
void* threadface_func(void* parameter){
CvSeq * detectRect_face;
parameter_t *p =(parameter_t*)parameter;
detectRect_face=cvHaarDetectObjects(p->capturedImg,p->pCascade_face,p->storage,1.15, 3, 0,cvSize(50,50));
for(int i=0;i<(detectRect_face ? detectRect_face->total:0); i++ )
{
CvRect* r = (CvRect*)cvGetSeqElem(detectRect_face, i);
CvPoint pt1 = { r->x, r->y };
CvPoint pt2 = { r->x + r->width, r->y + r->height };
cvRectangle(p->capturedImg, pt1, pt2, CV_RGB(255,0,0), 1,8, 0);
}
return 0;
}
int main ()
{
CvCapture* capture = cvCaptureFromCAM(0);
IplImage* capturedImg;
int resCount = 1;
int flags = CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH;
CvHaarClassifierCascade * pCascade_face;
pthread_t threadface;
pCascade_face = (CvHaarClassifierCascade *)cvLoad("C:/Users/Furkan/Desktop/Computer Vision/Programlar/opencv/data/haarcascades/haarcascade_frontalface_alt.xml");
cvNamedWindow("FaceDetection");
while (true)
{
CvMemStorage * storage = 0;
capturedImg = cvQueryFrame(capture);
storage = cvCreateMemStorage(0);
parameter_t my_parameters;
my_parameters.capturedImg=capturedImg;
my_parameters.storage=storage;
my_parameters.pCascade_face=pCascade_face;
int k=pthread_create(&threadface,0,threadface_func,(void*)&my_parameters);
if(k!=0)
{
printf("Create thread failed! error");
return 1;
}
cvShowImage("FaceDetection", capturedImg);
}
cvDestroyWindow("FaceDetection");
cvReleaseCapture(&capture);
pthread_exit(NULL);
return 0;
}
Please Help.
the IplImages you get from the capture are pointing to videodriver-memory. to use them in another thread, youve got to clone() them. (i see, that you're even trying to draw into that).
you're generating new threads at an absurd high rate there, without ever waiting for one to finish
i can't see any lock/mutex in your code
please reconsider using multiple threads at all. at least, it won't work like this
(seems, that your opencv version & your api use could need an upgrade, too .. )

OpenCV 2.1: Runtime error

I have a program which uses OpenCV. I have a webcam and it captures color frames and I want to convert the color frames to gray-scale frames. So, I used the cvCvtColor(color_frame, gray_frame, CV_BGR2GRAY); to convert the color frames to BW frames.
Upon using this color->Grayscale conversion function, I get a runtime error as:
OpenCV Error: Null pointer (NULL array pointer is passed) in unknown function, file ..\..\..\..\ocv\opencv\src\cxcore\cxarray.cpp, line 2376
Anyone experienced this problem before? Any solutions how to fix this?
Thanks
My Program
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<sys/time.h>
#include"cv.h"
#include"highgui.h"
#include"cxtypes.h"
#define ERROR -1
#define NO_ERROR 1
int main()
{
int EXIT_STATUS = NO_ERROR;
int camera_index = 0;
CvCapture *capture = 0;
IplImage *color_frame = NULL;
IplImage *gray_frame = NULL;
int exit_key_press = 0;
capture = cvCaptureFromCAM(camera_index);
cvNamedWindow("SURF", CV_WINDOW_AUTOSIZE);
while(exit_key_press != 's')
{
/* Capture a frame */
color_frame = cvQueryFrame(capture);
if(color_frame == NULL)
{
break;
}
else
{
// When this line is enabled the runtime error occurs.
//cvCvtColor(color_frame, gray_frame, CV_BGR2GRAY);
cvShowImage("SURF", color_frame );
}
exit_key_press = cvWaitKey(1);
}
cvDestroyWindow("Output");
cvReleaseCapture(&capture);
printf("\n\n~*~The END~*~");
exit(EXIT_STATUS);
}
I see a lot of people trying to do this simple task and having trouble with it.
So I took the liberty of changing your source code into a program that would display on the screen the grayscale converted video from the webcam.
Please use this code for reference.
I compiled on my Macbook Pro with:
g++ -I/usr/include/opencv -c gray.cpp -o gray.o -m32 -arch i386
g++ gray.o -o gray -L/usr/lib -lcxcore -lcv -lhighgui -lcvaux -lml -ldl -m32 -arch i386
File: gray.cpp
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<sys/time.h>
#include"cv.h"
#include"highgui.h"
#include"cxtypes.h"
#define ERROR -1
#define NO_ERROR 1
int main()
{
int camera_index = 0;
IplImage *color_frame = NULL;
int exit_key_press = 0;
CvCapture *capture = NULL;
capture = cvCaptureFromCAM(camera_index);
if (!capture)
{
printf("!!! ERROR: cvCaptureFromCAM\n");
return -1;
}
cvNamedWindow("Grayscale video", CV_WINDOW_AUTOSIZE);
while (exit_key_press != 'q')
{
/* Capture a frame */
color_frame = cvQueryFrame(capture);
if (color_frame == NULL)
{
printf("!!! ERROR: cvQueryFrame\n");
break;
}
else
{
IplImage* gray_frame = cvCreateImage(cvSize(color_frame->width, color_frame->height), color_frame->depth, 1);
if (gray_frame == NULL)
{
printf("!!! ERROR: cvCreateImage\n");
continue;
}
cvCvtColor(color_frame, gray_frame, CV_BGR2GRAY);
cvShowImage("Grayscale video", gray_frame);
cvReleaseImage(&gray_frame);
}
exit_key_press = cvWaitKey(1);
}
cvDestroyWindow("Grayscale video");
cvReleaseCapture(&capture);
return 0;
}
Don't you have to allocate your IplImage? It is not specified by the conversion function but I believe you have to allocate a dst the same size/type as the source.

Resources