Opencv corners detection for High resolution images - c

I've just implemented corner detection using OpenCV and i got the corner value as zero.i use 3488*2616 resolution camera images.it is there any proper way to find the corner detection for high resolution images.i d'not know where i did my mistake.Here with i've attached my code and images also.please help me.i'm very new to opencv.sorry for my English.
int board_w = 6;
int board_h = 6;
const int MAX_CORNERS = 500;
int main()
{
int board_n = (board_w-1) * (board_h-1);
CvSize board_sz = cvSize( board_w-1, board_h-1 );
CvPoint2D32f* corners = new CvPoint2D32f[board_n];
int cornerCount = 0;
IplImage *image = cvLoadImage("myimage.jpg");
IplImage *gray_image = cvCreateImage(cvGetSize(image),8,1);
cvCvtColor(image, gray_image, CV_BGR2GRAY );
const int N = cvFindChessboardCorners(gray_image,board_sz,&corners[0],&cornerCount,10);
cvFindCornerSubPix(gray_image,&corners[0],cornerCount,cvSize(3,3),cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_EPS,0,.01));
printf("\ the count was:%d \n",cornerCount);
for (int i = 0; i < cornerCount; i++)
{
cvCircle (image, cvPointFrom32f (corners[i]), 3, CV_RGB (0, 0, 255), 2);
}
cvNamedWindow("firstframe");
cvShowImage("firstframe",image);
cvWaitKey(0);
cvReleaseImage(&image);
cvReleaseImage(&gray_image);
}
The code will work for normal chessboard images.but while using real time camera images with that resolution it ll not works.please help me.thanks in advance.

If you suspect that detection is really failing simply because you are using high resolution images, you can always cvResize() your IplImage to a smaller size and verify your observation.

Related

Difficulties getting a pure sounding sine wave (currently getting a subtle frequency with it)

I'm attempting to play a pure sine wave in SDL2, but I'm finding that I can't seem to get a completely pure tone. It sounds pretty close to a true sine wave, but there is a slight secondary frequency behind it that sounds closer to a square wave. I've recorded the sound and verified that it is indeed incorrect when playing through two sets of speakers on two different systems (compared to a pure sine wave)
I've tried quite a few things at this point, including implementing multiple sine waves from stack overflow, and attempting to adapt the code from Handmade Hero. But each time, the same problem crops up. My suspicion is that there's something wrong with the bit precision, an incorrect cast somewhere, or that it has something to do with the specific way that SDL audio works that I'm not navigating around properly
Here's the main audio callback function that I'm currently working with along with my most recent attempt at writing a sine wave to the buffer:
#define Pi32 3.14159265359f
#define Tau32 (2.0f * Pi32)
void
AudioCallback(void* UserData, u8* Stream, int Length)
{
audio_user_data* AudioUserData = (audio_user_data*)UserData;
static u32 Count = 0;
u16* SampleBuffer = (u16*)Stream;
int SamplesToWrite = Length / AudioUserData->BytesPerSample;
for(int SampleIndex = 0; SampleIndex < SamplesToWrite; SampleIndex++)
{
u16 ToneValue = round((AudioUserData->ToneVolume * sin(Tau32 * (f32)Count / (f32)AudioUserData->WavePeriod)));
*SampleBuffer++ = ToneValue;
*SampleBuffer++ = ToneValue;
++Count;
}
}
I would be happy to provide more context if it might help
EDIT -- Additional Context:
#include "x:\SDL2-2.0.10\include\SDL.h"
#define Pi32 3.14159265359f
#define Tau32 (2.0f * Pi32)
#define INITIAL_SCREEN_WIDTH (8*80)
#define INITIAL_SCREEN_HEIGHT (8*60)
typedef struct audio_user_data audio_user_data;
struct audio_user_data
{
int SamplesPerSecond;
int BytesPerSample;
int SampleIndex;
int ToneHz;
int ToneVolume;
int WavePeriod;
u32 FileLength;
u16* BufferLocation;
};
void
AudioCallback(void* UserData, u8* Stream, int Length)
{
audio_user_data* AudioUserData = (audio_user_data*)UserData;
static u32 Count = 0;
u16* SampleBuffer = (u16*)Stream;
int SamplesToWrite = Length / AudioUserData->BytesPerSample;
for(int SampleIndex = 0; SampleIndex < SamplesToWrite; SampleIndex++)
{
u16 ToneValue = (0.5f + (AudioUserData->ToneVolume * sin(Tau32 * Count / AudioUserData->WavePeriod)));
*SampleBuffer++ = ToneValue;
*SampleBuffer++ = ToneValue;
++Count;
}
}
int
main(int argc, char* argv[])
{
SDL_Init(SDL_INIT_VIDEO|SDL_INIT_AUDIO);
SDL_Window* Window = SDL_CreateWindow("Spell Checker", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, INITIAL_SCREEN_WIDTH*2, INITIAL_SCREEN_HEIGHT*2, 0);
SDL_Renderer* Renderer = SDL_CreateRenderer(Window, 0, SDL_RENDERER_SOFTWARE);
SDL_PixelFormat* Format = SDL_AllocFormat(SDL_PIXELFORMAT_RGB888);
SDL_Texture* Screen = SDL_CreateTexture(Renderer, Format->format, SDL_TEXTUREACCESS_STREAMING, INITIAL_SCREEN_WIDTH, INITIAL_SCREEN_HEIGHT);
audio_user_data AudioUserData = {0};
AudioUserData.SamplesPerSecond = 44100;
AudioUserData.BytesPerSample = 2 * sizeof(int16);
AudioUserData.SampleIndex = 0;
AudioUserData.ToneVolume = 3000;
AudioUserData.ToneHz = 440;
AudioUserData.WavePeriod = AudioUserData.SamplesPerSecond / AudioUserData.ToneHz;
SDL_AudioSpec Want, Have;
SDL_AudioDeviceID AudioDeviceID;
Want.freq = AudioUserData.SamplesPerSecond;
Want.format = AUDIO_S16;
Want.channels = 2;
Want.samples = 4096;
Want.callback = &AudioCallback;
Want.userdata = &AudioUserData;
AudioDeviceID = SDL_OpenAudioDevice(0, 0, &Want, &Have, 0);
SDL_PauseAudioDevice(AudioDeviceID, 0); // Start playing
u32* PixelMap = calloc(INITIAL_SCREEN_WIDTH * INITIAL_SCREEN_HEIGHT, sizeof(PixelMap));
int PixelMapLocation = 0;
int Running = 1;
while(Running)
{
SDL_Event Event;
while(SDL_PollEvent(&Event))
{
if(Event.type == SDL_QUIT)
{
Running = 0;
break;
}
}
// Test colors
PixelMapLocation = 0;
for(int Row = 0; Row < INITIAL_SCREEN_WIDTH; ++Row)
{
for(int Col = 0; Col < INITIAL_SCREEN_HEIGHT; ++Col)
{
PixelMap[PixelMapLocation++] = 0xFF00FF;
}
}
for(int Row = 0; Row < INITIAL_SCREEN_WIDTH; ++Row)
{
for(int Col = 0; Col < INITIAL_SCREEN_HEIGHT; ++Col)
{
PixelMap[PixelMapLocation++] = 0x00FFFF;
}
}
SDL_UpdateTexture(Screen, 0, PixelMap, INITIAL_SCREEN_WIDTH * sizeof(PixelMap));
SDL_RenderClear(Renderer);
SDL_RenderCopy(Renderer, Screen, 0, 0);
SDL_RenderPresent(Renderer);
}
return(0);
}
EDIT2:
I recorded the audio I'm hearing here (might need to turn the volume up to hear the problem):
https://www.youtube.com/watch?v=-V2IMhK2Zis&feature=youtu.be
I also ran a timing test, and got this back for the each run through of the AudioCallback function:
https://imgur.com/a/9pqCte0
EDIT3:
Oscilloscope readings --
My sine wave:
A pure sine wave:
(I can't see an appreciable difference, but maybe someone else can?) Edit: Oh wait, on the left side of the oscilloscope, there are clear differences between the two waveforms (though they don't appear in the main reading). Trying to figure out what the issue is -- at this point I am still unsure since I have tried several different algorithms
EDIT4:
Here's a picture of Want / Have to show that everything (other than size) is the same after calling SDL_OpenAudioDevice:
EDIT5:
Problem (sort of) solved! Setting AudioUserData.SamplesPerSecond to 48000 resulted in a pure sounding sine wave. But the question still stands: why does it only work at 48000??

ImageMagick to write image file row by row (or band by band)

Does ImageMagick (C API MagickWand) have a functionality to generate an image file (.jpg, .tif) using RGB raster data?
If so, can it also generate an image file band by band, meaning write few rows at a time until it writes the whole image? Or one row at a time?
I believe you are describing image row iterators. ImageMagick provided PixelIterator methods to allow traversing of image data.
Here's a quick example.
#include <stdio.h>
#include <wand/MagickWand.h>
int main(int argc, const char * argv[]) {
// Set up IM environment.
MagickWandGenesis();
// Prototype
MagickWand * wand;
PixelWand * bg;
PixelIterator * iter;
PixelWand ** row;
MagickPixelPacket pixel;
size_t
x,
y,
row_width,
width = 200,
height = 50;
unsigned int
seed = 0xABCD;
// Allocate & initialize.
wand = NewMagickWand();
bg = NewPixelWand();
// Create an empty image.
PixelSetColor(bg, "WHITE");
MagickNewImage(wand, width, height, bg);
// Free color resource.
bg = DestroyPixelWand(bg);
srand(seed); // Seed random number.
// Allocate & initialize pixel iterator
iter = NewPixelIterator(wand);
// Loop through all rows.
for (y = 0; y < height; ++ y)
{
// Pull all pixels in a row.
row = PixelGetNextIteratorRow(iter, &row_width);
// Iterate over all pixels collected.
for (x = 0; x < row_width; ++x)
{
// Copy pixel data to packet.
PixelGetMagickColor(row[x], &pixel);
// Set random colors.
pixel.red = rand() & QuantumRange;
pixel.green = rand() & QuantumRange;
pixel.blue = rand() & QuantumRange;
// Put data back to pixel from packet.
PixelSetMagickColor(row[x], &pixel);
}
// Sync manipulated data on buffer back to image.
PixelSyncIterator(iter);
}
/******************************
Let's set row 16 to be all RED
******************************/
PixelSetIteratorRow(iter, 15);
row = PixelGetNextIteratorRow(iter, &row_width);
for (x = 0; x < row_width; ++x)
{
PixelGetMagickColor(row[x], &pixel);
pixel.red = QuantumRange;
pixel.green = 0;
pixel.blue = 0;
PixelSetMagickColor(row[x], &pixel);
}
// Sync manipulated data on buffer back to image.
PixelSyncIterator(iter);
// Free iterator resorce.
iter = DestroyPixelIterator(iter);
MagickWriteImage(wand, "/tmp/output.png");
// Free image data.
wand = DestroyMagickWand(wand);
MagickWandTerminus();
return 0;
}
Please note that the above example uses ImageMagick 6. For ImageMagick 7, please review Porting Guide.

c# WIA 12 bit grayscale image loses quality as bitmap

I am writing a windows forms application. I have an Image, received from CCD camera. The camera takes 12 bit grayscale tiff – 2 bytes per pixel. To receive it I am using WIA(windows image acquisition). I can make a byte array from the image data. After that I need to display the image in picturebox. When shown in picturebox, the image is too dark. Also, the created bitmap is Format32bppArgb. Do I lose quality, when creating the bitmap and how can I make the needed bitmap format? I am extremely new to image processing and any help will be great. I have read many forum posts on the topic of conversion between the formats, but with no luck so far.
So any ideas how to receive 16 bit grayscale bitmap from what I have so far?
Edit: This now is working:
private void btnAdvancedTestSnap_Click(object sender, EventArgs e)
{
WIA.CommonDialog _dialog = new CommonDialogClass();
WIA.Device _camera = _dialog.ShowSelectDevice(WIA.WiaDeviceType.CameraDeviceType, false, false);
ImageFile imageFile = (ImageFile)_camera.Items[1].Transfer(EnvFormatID.wiaFormatTIFF);
Byte[] receivedBytes = (byte[])imageFile.FileData.get_BinaryData();
int bytecount = receivedBytes.Length;
int width = imageFile.Width;
int height = imageFile.Height;
int dimension = width * height * 2;//number of bytes representing the image - 2 bytes per pixel
int startImgBytes = bytecount - dimension;
startImgBytes += 1;// from which position of the big array to start making pixel values
byte[] imageBytes = new byte[dimension]; //byte array with only bytes,representing the picture
int j = 0;
for (int i = startImgBytes; i < receivedBytes.Length; i++)//filling the only pixel byte data array
{
imageBytes[j] = receivedBytes[i];
j++;
}
int pixDimension = width * height; //number of pixels in the image
int[] pixVal = new int[pixDimension];
int z = 0;
for (int i = 0; i < imageBytes.Length; i+=2)
{
int res = (imageBytes[i] * 0x100) + imageBytes[i + 1];//int value of the pixel, 2 bytes per pixel
int pix = (res * 255) / 4095;// scalling down to 8 bit value
pixVal[z] = pix;
z++;
}
Bitmap newImage = new Bitmap(width, height, PixelFormat.Format24bppRgb);
Color p;
//grayscale
int counter = 0;
for (int y = 0; y < height; y++)//height
{
for (int x = 0; x < width; x++)//width
{
int val = pixVal[counter];
newImage.SetPixel(x,y,Color.FromArgb(255,val,val,val));
counter++;
}
}
pbAdvanced.Image = newImage; //show the image in picture box
But this method is very slow for high resolution. Any ideas how to improve the speed. I read examples with Marshal.Copy and lockbits, but in all examples they use a source image and copy to new. Any help will be greately appreciated

compare histograms of grayscale images in opencv

hi can anyone provide me with a simple open cv program to load two RGB images, convert it to Gray scale, calculate histogram and then compare their histograms. I saw a similar program done in the open cv site but they used HSV instead of Gray scale and it was a c++ program. I can look up the flow and everything...I don't know which functions to use and what their arguments will mean....
Regards,
Kiran
Here is the simple code snippet that does the thing. Since you not told how you want to compare histograms, I suggest to do it visually.
#include <opencv2/opencv.hpp>
void show_histogram(std::string const& name, cv::Mat1b const& image)
{
// Set histogram bins count
int bins = 256;
int histSize[] = {bins};
// Set ranges for histogram bins
float lranges[] = {0, 256};
const float* ranges[] = {lranges};
// create matrix for histogram
cv::Mat hist;
int channels[] = {0};
// create matrix for histogram visualization
int const hist_height = 256;
cv::Mat3b hist_image = cv::Mat3b::zeros(hist_height, bins);
cv::calcHist(&image, 1, channels, cv::Mat(), hist, 1, histSize, ranges, true, false);
double max_val=0;
minMaxLoc(hist, 0, &max_val);
// visualize each bin
for(int b = 0; b < bins; b++) {
float const binVal = hist.at<float>(b);
int const height = cvRound(binVal*hist_height/max_val);
cv::line
( hist_image
, cv::Point(b, hist_height-height), cv::Point(b, hist_height)
, cv::Scalar::all(255)
);
}
cv::imshow(name, hist_image);
}
int main (int argc, const char* argv[])
{
// here you can use cv::IMREAD_GRAYSCALE to load grayscale image, see image2
cv::Mat3b const image1 = cv::imread("C:\\workspace\\horse.png", cv::IMREAD_COLOR);
cv::Mat1b image1_gray;
cv::cvtColor(image1, image1_gray, cv::COLOR_BGR2GRAY);
cv::imshow("image1", image1_gray);
show_histogram("image1 hist", image1_gray);
cv::Mat1b const image2 = cv::imread("C:\\workspace\\bunny.jpg", cv::IMREAD_GRAYSCALE);
cv::imshow("image2", image2);
show_histogram("image2 hist", image2);
cv::waitKey();
return 0;
}
Result:
To compare histograms you can look at the last four points from this http://docs.opencv.org/doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.html.

How to slice/cut an image into pieces

So I have a function that receives OpenCV image and turns it into gray-scale.
void UseLSD(IplImage* destination)
{
IplImage *destinationForGS = cvCreateImage(cvSize(destination->width, destination->height),IPL_DEPTH_8U,1);
cvCvtColor(destination,destinationForGS,CV_RGB2GRAY);
}
How now to cut that image into images of size 10x10 pixels and iterate true them?
(width and height may not divide on 10 but if there would be some loss (like loss from 1*h to 9*h+9*h pixels per image) it would be OK for me. )
BTW can you output one of 10*10 images onto screen. please.
You can crop your images into small pieces like this (iteration not tested):
// source image
IplImage *source = cvLoadImage("lena.jpg", 1);
int roiSize = 10;
for(int j = 0; j < source->width/roiSize; ++j) {
for(int i = 0; i < source->height/roiSize; ++i) {
cvSetImageROI(source, cvRect(i*roiSize, j*roiSize, roiSize, roiSize));
// cropped image
IplImage *cropSource = cvCreateImage(cvGetSize(source), source->depth, source->nChannels);
// copy
cvCopy(source, cropSource, NULL);
// ... do what you want with your cropped image ...
// always reset the ROI
cvResetImageROI(source);
}
}
I think the simplest solution is to use Regions of Interest. Here is sample
/* load image */
IplImage *img1 = cvLoadImage("elvita.jpg", 1);
/* sets the Region of Interest
Note that the rectangle area has to be __INSIDE__ the image
You just iterate througt x and y.
*/
cvSetImageROI(img1, cvRect(x*10, y*10, x*10 + 10, y*10 + 10));
/* create destination image
Note that cvGetSize will return the width and the height of ROI */
IplImage *img2 = cvCreateImage(cvGetSize(img1),
img1->depth,
img1->nChannels);
/* copy subimage */
cvCopy(img1, img2, NULL);
/* always reset the Region of Interest */
cvResetImageROI(img1);

Resources