i am trying to learn about OpenCV and found a really good tutorial on youtube however, everytime i am told to "select a video device" on Visual Studio, a R6010 error pops up and i have to keep aborting the program. Here is the source code. I am using OpenCV 2.2, 2010 Visua Studio and the camera in question is a HP TrueVision HD. Thanks!
//tracker1
#include<opencv\cvaux.h>
#include<opencv\highgui.h>
#include<opencv\cxcore.h>
#include<stdio.h>
#include<stdlib.h>
int main(int argc, char* []){
CvSize size640x480 = cvSize(640, 480);
CvCapture* p_capWebCam; //assign a webcam (later)
IplImage* p_imgOriginal; //image given by the webcam
IplImage* p_imgProcessed; //webcam image processed
CvMemStorage* p_strStorage; //passing stored variables
CvSeq* p_seqCircles;
float* p_fltXYRadius; //3 points, 0 = X, 1 = Y, 2 = Radius
int i; //looping integer
char charCheckForEscKey;
p_capWebCam = cvCaptureFromCAM(0);
if(p_capWebCam == NULL){
printf("error, webcam not found");
getchar();
return(-1);
}
cvNamedWindow("original", CV_WINDOW_AUTOSIZE);
cvNamedWindow("processed", CV_WINDOW_AUTOSIZE);
p_imgProcessed = cvCreateImage(size640x480, IPL_DEPTH_8U, 1);
while(1){
p_imgOriginal = cvQueryFrame(p_capWebCam); //get frame of webcame
if(p_capWebCam == NULL){ //when the frame is not attained
printf("error, no frames were attained");
getchar();
break;
}
cvInRangeS(p_imgOriginal, CV_RGB(175,0,0), CV_RGB(256, 100, 100), p_imgProcessed);
p_strStorage = cvCreateMemStorage(0);
cvSmooth(p_imgProcessed, p_imgProcessed, CV_GAUSSIAN, 9 ,9);
p_seqCircles = cvHoughCircles(p_imgProcessed, p_strStorage, CV_HOUGH_GRADIENT, 2, p_imgProcessed->height/4, 100, 50, 10, 400);
for(i=0; i<p_seqCircles->total; i++){
p_fltXYRadius = (float*)cvGetSeqElem(p_seqCircles, i);
printf("ball position x = %f, y = %f, r = %f \n", p_fltXYRadius[0], p_fltXYRadius[1], p_fltXYRadius[2]);
cvCircle(p_imgOriginal, cvPoint(cvRound(p_fltXYRadius[0]), cvRound(p_fltXYRadius[1])), 3, CV_RGB(0, 255, 0), CV_FILLED);
cvCircle(p_imgOriginal, cvPoint(cvRound(p_fltXYRadius[0]), cvRound(p_fltXYRadius[1])), cvRound(p_fltXYRadius[2]), CV_RGB(255, 0, 0), 3);
}
cvShowImage("original", p_imgOriginal);
cvShowImage("processed", p_imgProcessed);
cvReleaseMemStorage(&p_strStorage);
charCheckForEscKey = cvWaitKey(10);
if(charCheckForEscKey == 27) break;
}
cvReleaseCapture(&p_capWebCam);
cvDestroyWindow("original");
cvDestroyWindow("processed");
}
Related
I wrote a simple code that in theory should track the mouse position using global pixel coordinates based on display width/height.
The application creates a window (1x1 pixel at top left display corner) only for catching "ok" keyboard buttons press to stop it and to move mouse at (0, 0) when application starts. So "write" ok in order to close the application!
When I move the mouse from left to right (or top/bottom) I always obtain a different value when I reach the edge of the screen. My screen is set at 1920x1080 but never reach 1920 when I'm on the right edge. Maybe a normalization problem but this is not the point.
The problem is that if I move the mouse to the left edge of the screen when I'm at right edge I can't reach 0 and when I go right again I don't reach the same value as before.
It seems that some events are lost and the behaviour changes if I move the mouse with higher acceleration.
This is a working code, you only have to change the variable mouseID with your based on output from ls -lh /dev/input/by-id command.
#include <stdio.h>
#include <string.h>
#include "/usr/include/linux/input.h"
#include <poll.h>
#include <fcntl.h>
#include <unistd.h>
#include "SDL.h"
typedef struct connectedMouseInfoStruct
{
struct pollfd fileDescriptors[1];
char devInputStream[96];
unsigned char eventsBuffer[640000];
short type;
short code;
int value;
int currX;
int currY;
int prevX;
int prevY;
}connectedMouseInfo;
int main(int argc, char **argv)
{
char *mouseID = "usb-PixArt_Microsoft_USB_Optical_Mouse-event-mouse";
int exit = 0;
int firstKey = 0;
char *inputEvent = "/dev/input/by-id/";
connectedMouseInfo connectedMouse = {0};
int dx = 0;
int dy = 0;
SDL_Event events;
const Uint8 *keyboardState = NULL;
int bytesRead = 0;
int bufferPosition;
// Start
SDL_InitSubSystem(SDL_INIT_VIDEO);
SDL_DisplayMode currentVideoMode;
SDL_GetCurrentDisplayMode(0, ¤tVideoMode);
SDL_Window* gameWindow = NULL;
gameWindow = SDL_CreateWindow(
"MouseEvents",
0,
0,
1,
1,
SDL_WINDOW_HIDDEN);
strcat(connectedMouse.devInputStream, inputEvent);
strcat(connectedMouse.devInputStream, mouseID);
connectedMouse.fileDescriptors[0].fd = open(connectedMouse.devInputStream, O_RDONLY | O_NONBLOCK);
connectedMouse.fileDescriptors[0].events = POLLIN;
SDL_WarpMouseGlobal(0, 0);
while (!exit)
{
while (SDL_PollEvent(&events))
{
keyboardState = SDL_GetKeyboardState(NULL);
if (events.type == SDL_KEYDOWN)
{
if (events.key.keysym.scancode == SDL_SCANCODE_O)
{
firstKey = 1;
}
}
if (firstKey && events.key.keysym.scancode == SDL_SCANCODE_K)
{
exit = 1;
}
}
poll(&connectedMouse.fileDescriptors[0], 1, 0);
if (connectedMouse.fileDescriptors[0].revents == POLLIN)
{
bytesRead = 0;
bytesRead = read(connectedMouse.fileDescriptors[0].fd, connectedMouse.eventsBuffer, 640000);
if (bytesRead == -1)
printf("Read error!!!\n");
else if ((bytesRead % sizeof(struct input_event)) != 0)
printf("Incomplete packet!!!\n");
else
{
printf("Read Bytes: %d\n", bytesRead);
for (bufferPosition = 0; bufferPosition < bytesRead; bufferPosition += sizeof(struct input_event))
{
memcpy(&connectedMouse.type, &connectedMouse.eventsBuffer[bufferPosition + sizeof(struct input_event) - 8], 2);
memcpy(&connectedMouse.code, &connectedMouse.eventsBuffer[bufferPosition + sizeof(struct input_event) - 6], 2);
memcpy(&connectedMouse.value, &connectedMouse.eventsBuffer[bufferPosition + sizeof(struct input_event) - 4], 4);
if (connectedMouse.type == EV_REL)
{
if (connectedMouse.code == REL_X)
{
connectedMouse.currX += connectedMouse.value;
}
else if (connectedMouse.code == REL_Y)
{
connectedMouse.currY += connectedMouse.value;
}
}
}
}
}
if (connectedMouse.currX > currentVideoMode.w - 1)
connectedMouse.currX = currentVideoMode.w - 1;
else if (connectedMouse.currX < 0)
connectedMouse.currX = 0;
if (connectedMouse.currY > currentVideoMode.h - 1)
connectedMouse.currY = currentVideoMode.h - 1;
else if (connectedMouse.currY < 0)
connectedMouse.currY = 0;
dx = connectedMouse.currX - connectedMouse.prevX;
dy = connectedMouse.currY - connectedMouse.prevY;
if (dx != 0 || dy != 0)
{
printf("Display X: %d\n", connectedMouse.currX);
printf("Display Y: %d\n", connectedMouse.currY);
printf("Delta X: %d\n", dx);
printf("Delta Y: %d\n", dy);
printf("\n");
}
connectedMouse.prevX = connectedMouse.currX;
connectedMouse.prevY = connectedMouse.currY;
}
return 0;
}
I don't think you are actually losing mouse events. The position of the cursor on the screen just doesn't match the position you keep track of because of mouse acceleration by the X server. If you turn that off, you will notice the mouse moves very slowly, and the values in your code will increase rapidly. So much so, that you will only cover a portion of the screen - but consistently. This is because the coordinate space is larger than your screen resolution.
Furthermore, you should not adjust the value on receiving an EV_SYN event. See the documentation.
I found a solution: if we set a flat profile for pointer acceleration the coordinates shown by the code exactly match the display width/height. I tried to move the pointer over a well defined pixel on my Desktop background and I always receive the same value.
So the problem was the mouse acceleration profile
I am developing a program in VS 2010 using OpenCV. I want to measure the luminance of every frame that the computer's camera captures. However, the camera's software stabilizes the luminance after 2-3 frames. Eg, if i put my thumb in front of the camera the first frame's luminance is 2 (scale from 0 to 255), but then while keeping my thumb in front of the camera the luminance becomes 7 and the 20 - it is stabilized there for the next frames. So the camera tries to make too dark pictures brighter and too bright pictures darker.
How can i measure the actual luminance without the camera's interference?
My code is:
#ifdef _CH_
#pragma package <opencv>
#endif
#include "stdafx.h"
#include <highgui.h>
#include "cv.h"
#include <stdio.h>
#include <stdlib.h>
#include "..\utilities.h"
int _tmain(int argc, _TCHAR* argv[])
{
FILE *file;
IplImage *img;
IplImage* grayscale_image;
int c, i, j, Luminance = 0, Pixel_Num = 0;
int Avg_Luminance;
int width_step;
int pixel_step;
// allocate memory for an image
// capture from video device #1
CvCapture* capture;
// create a window to display the images
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
// position the window
cvMoveWindow("mainWin", 5, 5);
while(1)
{
if(file = fopen("luminance_value.txt", "w+"))
{
// retrieve the captured frame
capture= cvCaptureFromCAM(1);
img=cvQueryFrame(capture);
grayscale_image = cvCreateImage( cvGetSize(img), 8, 1 );
cvConvertImage( img, grayscale_image );
width_step= grayscale_image->widthStep;
pixel_step= grayscale_image->widthStep/grayscale_image->width;
Pixel_Num = grayscale_image->width * grayscale_image->height;
for(i = 0; i < grayscale_image->height; i++)
{
for(j = 0; j < grayscale_image->width; j++)
{
unsigned char* point = GETPIXELPTRMACRO( grayscale_image, j, i, width_step, pixel_step);
Luminance += point[0];
}
}
Avg_Luminance = Luminance / Pixel_Num;
//Avg_Luminance = cvGetCaptureProperty(capture,CV_CAP_PROP_BRIGHTNESS);
//file = fopen("luminance_value.txt", "w+");
fprintf(file, "%d", Avg_Luminance);
fclose(file);
printf("Avg_Luminance = %d\n", Avg_Luminance);
Luminance = 0;
Pixel_Num = 0;
// show the image in the window
cvShowImage("mainWin", grayscale_image );
cvReleaseCapture(&capture);
// wait 10 ms for a key to be pressed
c=cvWaitKey(10000);
// escape key terminates program
if(c == 27)
break;
}
else
{
continue;
}
}
return 0;
}
I have a code which basically detects playing cards, isolates them from a dynamic background based on HSV settings, then uses contours to detect the 4 points of the card to find the exact x and y position of the card. From there, the ROI is set and I can perform further processing to detect the face value of the card.
However, I the code seems to be breaking and I can't seem to find the root cause of it.
I have cleared images & memory storages, I've ensured that all the Iplimages have the same formatting and resolution.
IplImage* GetThresholdedImage(IplImage* imgHSV){
IplImage* imgThresh=cvCreateImage(cvGetSize(imgHSV),IPL_DEPTH_8U, 1);
cvInRangeS(imgHSV, cvScalar(95,67,170), cvScalar(110,119,254), imgThresh); //Morning
return imgThresh;
}
IplImage* RedCheck(IplImage* imgBGR){
IplImage* img=cvCreateImage(cvGetSize(imgBGR),IPL_DEPTH_8U, 1);
cvInRangeS(imgBGR, cvScalar(0,0,100), cvScalar(100,100,254), img); //BGR
return img;
}
int main()
{
CvCapture* capture =0;
capture = cvCaptureFromCAM(0);
if(!capture)
{
printf("Capture failure\n");
return -1;
}
IplImage* frame = cvCreateImage(cvSize(48,64),IPL_DEPTH_8U,3);
while(true)
{
frame = cvQueryFrame(capture);
if(!frame) break;
frame=cvCloneImage(frame);
cvSmooth(frame, frame, CV_GAUSSIAN,3,3); //smooth the original image using Gaussian kernel
IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
cvCvtColor(frame, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV
IplImage* imgThresh = GetThresholdedImage(imgHSV);
cvSmooth(imgThresh, imgThresh, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel
CvSeq* contours;
CvSeq* result;
CvMemStorage *storage = cvCreateMemStorage(0);
cvFindContours(imgThresh, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
while(contours)
{
result = cvApproxPoly(contours, sizeof(CvContour),storage,CV_POLY_APPROX_DP,cvContourPerimeter(contours)*0.02,0);
if(result->total == 4)
{
CvPoint *pt[4];
for(int i=0;i<4;i++)
{
pt[i] = (CvPoint*)cvGetSeqElem(result,i);
}
if (cvArcLength(result,CV_WHOLE_SEQ,1) >= 400)
{
cvLine(imgThresh,*pt[0],*pt[1],cvScalar(255,0,0),4);
cvLine(imgThresh,*pt[1],*pt[2],cvScalar(255,0,0),4);
cvLine(imgThresh,*pt[2],*pt[3],cvScalar(255,0,0),4);
cvLine(imgThresh,*pt[3],*pt[0],cvScalar(255,0,0),4);
int ROIwidth = abs((*pt[0]).x - (*pt[1]).x);
int ROIheight = abs((*pt[1]).y - (*pt[2]).y);
cvSetImageROI(frame,cvRect((*pt[1]).x,(*pt[1]).y,ROIwidth,ROIheight));
IplImage* temp = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
cvCopy(frame,temp,0);
cvResetImageROI(frame);
cvNamedWindow( "ROI", CV_WINDOW_AUTOSIZE );
cvShowImage( "ROI", temp);
printf("Width = %d\n",ROIwidth); //255-275
printf("Height = %d\n",ROIheight); //140-160
//Card Value Detection Starts Here
IplImage* colorcheck = RedCheck(temp);
int redpixelcheck = cvCountNonZero(colorcheck);
if (redpixelcheck <= 15)
{
printf("Card is Black\n");
}
else if (redpixelcheck >= 16)
{
printf("Card is Red\n");
}
//Card Value Detection Ends Here
cvReleaseImage(&temp);
cvReleaseImage(&frame);
cvReleaseImage(&colorcheck);
delete &ROIwidth;
delete &ROIheight;
delete &redpixelcheck;
}
//delete [] pt[4];
}
delete &result;
contours = contours->h_next;
//cvPutText (frame_t,text,cvPoint(200,400), &font, cvScalar(255,255,0));
}
cvNamedWindow( "Contour", CV_WINDOW_AUTOSIZE );
cvShowImage( "Contour", imgThresh);
cvNamedWindow("Video",CV_WINDOW_AUTOSIZE);
cvShowImage("Video", frame);
//Clean up used images
cvReleaseImage(&imgHSV);
cvReleaseImage(&imgThresh);
cvReleaseImage(&frame);
cvClearMemStorage(storage);
cvReleaseMemStorage(&storage);
//Wait 50mS
int c = cvWaitKey(10);
//If 'ESC' is pressed, break the loop
if((char)c==27 ) break;
}
cvDestroyAllWindows() ;
cvReleaseCapture(&capture);
return 0;
}
The disassembly always points to the same address
770915DE add esp,4
i'm trying to make a checkers game and atm i'm doing the interface with SDL, but i'm just learning C and SDL, how can I move a surface I added to the screen ? I want it the simplest as possible, just remove from X and show on Y, how do I remove a surface to make it appear on another place on the screen ? here is my code:
#include "SDL.h"
#define BRANCA 2
#define PRETA 1
#define DAMA 2
#define NORMAL 1
//The attributes of the screen
const int SCREEN_WIDTH = 640;
const int SCREEN_HEIGHT = 480;
const int SCREEN_BPP = 32;
//The surfaces that will be used
SDL_Surface *pecaPreta = NULL;
SDL_Surface *pecaBranca = NULL;
SDL_Surface *pecaDamaPreta = NULL;
SDL_Surface *pecaDamaBranca = NULL;
SDL_Surface *background = NULL;
SDL_Surface *screen = NULL;
SDL_Event event;
SDL_Surface *load_image(char * filename )
{
SDL_Surface* loadedImage = NULL;
SDL_Surface* optimizedImage = NULL;
loadedImage = SDL_LoadBMP(filename);
if( loadedImage != NULL )
{
optimizedImage = SDL_DisplayFormat( loadedImage );
SDL_FreeSurface( loadedImage );
if( optimizedImage != NULL )
{
Uint32 colorkey = SDL_MapRGB( optimizedImage->format, 0, 0xFF, 0xFF );
SDL_SetColorKey( optimizedImage, SDL_SRCCOLORKEY, colorkey );
}
}
return optimizedImage;
}
void apply_surface( int x, int y, SDL_Surface* source, SDL_Surface* destination )
{
SDL_Rect offset;
offset.x = x;
offset.y = y;
SDL_BlitSurface( source, NULL, destination, &offset );
}
void inserePeca(int tipo, int posX, int posY, int cor)
{
switch(cor)
{
case 1:
switch (tipo)
{
case 1:
apply_surface(posX, posY, pecaPreta, screen);
break;
case 2:
apply_surface(posX, posY, pecaDamaPreta, screen);
break;
}
break;
case 2:
switch (tipo)
{
case 1:
apply_surface(posX, posY, pecaBranca, screen);
break;
case 2:
apply_surface(posX, posY, pecaDamaBranca, screen);
break;
}
break;
}
}
int main()
{
int quit = 0;
if( SDL_Init( SDL_INIT_EVERYTHING ) == -1 )
{
return 1;
}
screen = SDL_SetVideoMode( SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_BPP, SDL_SWSURFACE );
if( screen == NULL )
{
return 1;
}
//Set the window caption
SDL_WM_SetCaption( "Jogo de Damas 0.1b", NULL );
//Load the images
pecaPreta = load_image( "pecapreta.bmp" );
pecaBranca = load_image("pecabranca.bmp");
pecaDamaPreta = load_image("pecadamapreta.bmp");
pecaDamaBranca = load_image("pecadamabranca.bmp");
background = load_image( "tabuleiro.bmp" );
//Apply the background to the screen
apply_surface( 0, 0, background, screen );
inserePeca(DAMA, 0,0, BRANCA);
inserePeca(NORMAL, 80,0, PRETA);
//Update the screen
if( SDL_Flip( screen ) == -1 )
{
return 1;
}
while( quit == 0 )
{
//While there's an event to handle
while( SDL_PollEvent( &event ) )
{
//If the user has Xed out the window
if( event.type == SDL_QUIT )
{
//Quit the program
quit = -1;
}
}
}
//Free the surfaces
SDL_FreeSurface( pecaPreta );
SDL_FreeSurface( background );
//Quit SDL
SDL_Quit();
return 0;
}
as you can see I add a block on "inserePeca", I want to move it after I create it
The buffer for the screen doesn't keep all the things you draw on it as separate items -- it just holds the end result of all the drawing operations. So, you can't just draw the background, then draw a piece on it, then move the piece around -- you need to redraw the affected parts of the screen with the required changes.
You still have the images of the pieces, and you still have the background image; the way to move a piece you've drawn is simply to restore the background to the old position by blitting it again, and then blit the piece in the new position. Rather than drawing the whole screen and all the pieces over again, though, you can just draw the changed areas: blit just a part of the background to erase the old square, and then blit the piece onto the new square.
The following function is similar to your apply_surface() function, but instead of copying the whole source image to the the given coordinates of the destination, it copies a region of a given width and height from the given coordinates of the source image to the same coordinates of the destination. This can then be used to restore the background for a small part of the screen.
/* Blit a region from src to the corresponding region in dest. Uses the same
* x and y coordinates for the regions in both src and dest. w and h give the
* width and height of the region, respectively.
*/
void erase_rect( int x, int y, int w, int h, SDL_Surface *src, SDL_Surface *dest)
{
SDL_Rect offset;
offset.x = x;
offset.y = y;
offset.w = w;
offset.h = h;
SDL_BlitSurface( src, &offset, dest, &offset );
}
So if your squares are 50x50, and you need to move a piece from a square at (120, 40) to the square at (170, 90), you could do something like the following:
/* erase old 50x50 square at (120,40) (to background image) */
erase_rect( 120, 40, 50, 50, background, screen );
/* draw piece at new position of (170,90) */
inserePeca(NORMAL, 170, 90, PRETA);
i'm using LSD to detect straight lines in an image, the code that i have downloaded contains a Minimal example of calling LSD but it's static (i.e it outputs only the value in the main function) i want to apply the code on a video, that's the minimal example that outputs static results.
#include <stdio.h>
#include "lsd.h"
int main(void)
{
image_double image;
ntuple_list out;
unsigned int x,y,i,j;
unsigned int X = 512; /* x image size */
unsigned int Y = 512; /* y image size */
/* create a simple image: left half black, right half gray */
image = new_image_double(X,Y);
for(x=0;x<X;x++)
for(y=0;y<Y;y++)
image->data[ x + y * image->xsize ] = x<X/2 ? 0.0 : 64.0; /* image(x,y) */
IplImage* imgInTmp = cvLoadImage("C:\Documents and Settings\Eslam farag\My Documents\Visual Studio 2008\Projects\line\hand.JPEG", 0);
/* call LSD */
out = lsd(image);
/* print output */
printf("%u line segments found:\n",out->size);
for(i=0;i<out->size;i++)
{
for(j=0;j<out->dim;j++)
printf("%f ",out->values[ i * out->dim + j ]);
printf("\n");
}
/* free memory */
free_image_double(image);
free_ntuple_list(out);
return 0;
}
if anyone can help me to apply the code on video i will be pleased.thanks
best regards,
Since I couldn't find a complete example, I'm sharing a code I wrote that uses OpenCV to load a video file from the disk and perform some image processing on it.
The application takes a filename as input (on the cmd line) and converts each frame of the video to it's grayscale equivalent using OpenCV built-in function cvCvtColor() to do this.
I added some comments on the code to help you understand the basic tasks.
read_video.cpp:
#include <stdio.h>
#include <highgui.h>
#include <cv.h>
int main(int argc, char* argv[])
{
cvNamedWindow("video", CV_WINDOW_AUTOSIZE);
CvCapture *capture = cvCaptureFromAVI(argv[1]);
if(!capture)
{
printf("!!! cvCaptureFromAVI failed (file not found?)\n");
return -1;
}
IplImage* frame;
char key = 0;
while (key != 'q') // Loop for querying video frames. Pressing Q will quit
{
frame = cvQueryFrame( capture );
if( !frame )
{
printf("!!! cvQueryFrame failed\n");
break;
}
/* Let's do a grayscale conversion just 4 fun */
// A grayscale image has only one channel, and most probably the original
// video works with 3 channels (RGB). So, for the conversion to work, we
// need to allocate an image with only 1 channel to store the result of
// this operation.
IplImage* gray_frame = 0;
gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1);
if (!gray_frame)
{
printf("!!! cvCreateImage failed!\n" );
return -1;
}
cvCvtColor(frame, gray_frame, CV_RGB2GRAY); // The conversion itself
// Display processed frame on window
cvShowImage("video", gray_frame);
// Release allocated resources
cvReleaseImage(&gray_frame);
key = cvWaitKey(33);
}
cvReleaseCapture(&capture);
cvDestroyWindow("video");
}
Compiled with:
g++ read_video.cpp -o read `pkg-config --cflags --libs opencv`
If you want to know how to iterate through the pixels of the frame to do your custom processing, you need to check the following answer because it shows how to do a manual grayscale conversion. There you go: OpenCV cvSet2d.....what does this do
here is example of the code using LSD with opencv
#include "lsd.h"
void Test_LSD(IplImage* img)
{
IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
cvCvtColor(img, grey, CV_BGR2GRAY);
image_double image;
ntuple_list out;
unsigned int x,y,i,j;
image = new_image_double(img->width,img->height);
for(x=0;x<grey->width;x++)
for(y=0;y<grey->height;y++)
{
CvScalar s= cvGet2D(grey,y,x);
double pix= s.val[0];
image->data[ x + y * image->xsize ]= pix; /* image(x,y) */
}
/* call LSD */
out = lsd(image);
//out= lsd_scale(image,1);
/* print output */
printf("%u line segments found:\n",out->size);
vector<Line> vec;
for(i=0;i<out->size;i++)
{
//for(j=0;j<out->dim;j++)
{
//printf("%f ",out->values[ i * out->dim + j ]);
Line line;
line.x1= out->values[ i * out->dim + 0];
line.y1= out->values[ i * out->dim + 1];
line.x2= out->values[ i * out->dim + 2];
line.y2= out->values[ i * out->dim + 3];
vec.push_back(line);
}
//printf("\n");
}
IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
cvZero(black);
draw_lines(vec,black);
/*cvNamedWindow("img", 0);
cvShowImage("img", img);*/
cvSaveImage("lines_detect.png",black/*img*/);
/* free memory */
free_image_double(image);
free_ntuple_list(out);
}
or this way
IplImage* get_lines(IplImage* img,vector<Line>& vec_lines)
{
//to grey
//IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
//cvCvtColor(img, grey, CV_BGR2GRAY);
image_double image;
ntuple_list out;
unsigned int x,y,i,j;
image = new_image_double(img->width,img->height);
for(x=0;x</*grey*/img->width;x++)
for(y=0;y</*grey*/img->height;y++)
{
CvScalar s= cvGet2D(/*grey*/img,y,x);
double pix= s.val[0];
image->data[ x + y * image->xsize ]= pix;
}
/* call LSD */
out = lsd(image);
//out= lsd_scale(image,1);
/* print output */
//printf("%u line segments found:\n",out->size);
//vector<Line> vec;
for(i=0;i<out->size;i++)
{
//for(j=0;j<out->dim;j++)
{
//printf("%f ",out->values[ i * out->dim + j ]);
Line line;
line.x1= out->values[ i * out->dim + 0];
line.y1= out->values[ i * out->dim + 1];
line.x2= out->values[ i * out->dim + 2];
line.y2= out->values[ i * out->dim + 3];
/*vec*/vec_lines.push_back(line);
}
//printf("\n");
}
IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
cvZero(black);
for(int i=0;i<vec_lines.size();++i)
{
//if(vec[i].x1==vec[i].x2||vec[i].y1==vec[i].y2)
cvLine(black,cvPoint(vec_lines[i].x1,vec_lines[i].y1),cvPoint(vec_lines[i].x2,vec_lines[i].y2),CV_RGB(255,255,255),1, CV_AA);
}
/*cvNamedWindow("img", 0);
cvShowImage("img", img);*/
//cvSaveImage("lines_detect.png",black/*img*/);
/* free memory */
//cvReleaseImage(&grey);
free_image_double(image);
free_ntuple_list(out);
return black;
}