Transparency In Sprite Texture Not Working - c

I'm trying to program a sprite into my 3D scene using OpenGL. The problem that I am experiencing is that when my sprite appears in the scene on top of the 3D objects, there's a black box around the sprite, when the black should be transparent.
I created my sprite sheet with GIMP as a BMP file with 32 bits (A8 B8 G8 R8). In addition, I also used GIMP to set the alpha values for every pixel that should be transparent to 0. I load my BMP file into my program as follows:
unsigned int LoadSpriteTexBMP(const char* file)
{
unsigned int texture; // Texture name
FILE* f; // File pointer
unsigned short magic; // Image magic
unsigned int dx,dy,size; // Image dimensions
unsigned short nbp,bpp; // Planes and bits per pixel
unsigned char* image; // Image data
unsigned int k; // Counter
int max; // Maximum texture dimensions
// Open file
f = fopen(file,"rb");
// Check image magic
if (fread(&magic,2,1,f)!=1)
Fatal("Cannot read magic from %s\n",file);
if (magic!=0x4D42 && magic!=0x424D)
Fatal("Image magic not BMP in %s\n",file);
// Seek to and read header
if (fseek(f,16,SEEK_CUR) || fread(&dx ,4,1,f)!=1 || fread(&dy ,4,1,f)!=1 ||
fread(&nbp,2,1,f)!=1 || fread(&bpp,2,1,f)!=1 || fread(&k,4,1,f)!=1)
Fatal("Cannot read header from %s\n",file);
// Reverse bytes on big endian hardware (detected by backwards magic)
if (magic==0x424D)
{
Reverse(&dx,4);
Reverse(&dy,4);
Reverse(&nbp,2);
Reverse(&bpp,2);
Reverse(&k,4);
}
// Check image parameters
glGetIntegerv(GL_MAX_TEXTURE_SIZE,&max);
if (dx<1 || dx>max)
Fatal("%s image width %d out of range 1-%d\n",file,dx,max);
if (dy<1 || dy>max)
Fatal("%s image height %d out of range 1-%d\n",file,dy,max);
if (nbp!=1)
Fatal("%s bit planes is not 1: %d\n",file,nbp);
if (bpp!=32)
Fatal("%s bits per pixel is not 32: %d\n",file,bpp);
// Allocate image memory
size = 4*dx*dy;
image = (unsigned char*) malloc(size);
if (!image)
Fatal("Cannot allocate %d bytes of memory for image %s\n",size,file);
// Seek to and read image
if (fseek(f,20,SEEK_CUR) || fread(image,size,1,f)!=1)
Fatal("Error reading data from image %s\n",file);
fclose(f);
// Reverse colors (ABGR -> BGRA)
for (k=0;k<size;k+=4)
{
unsigned char temp = image[k];
image[k] = image[k+1];
image[k+1] = image[k+2];
image[k+2] = image[k+3];
image[k+3] = temp;
}
// Generate 2D texture
glGenTextures(1,&texture);
glBindTexture(GL_TEXTURE_2D,texture);
// Copy image
glTexImage2D(GL_TEXTURE_2D,0,3,dx,dy,0,GL_BGRA,GL_UNSIGNED_BYTE,image);
if (glGetError())
Fatal("Error in glTexImage2D %s %dx%d\n",file,dx,dy);
// Scale linearly when image size doesn't match
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
// Free image memory
free(image);
// Return texture name
return texture;
}
To setup displaying my sprite in the scene, I use the following:
unsigned int texture = LoadSpriteTexBMP("Sprite.bmp");
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glTexEnvi(GL_TEXTURE_ENV , GL_TEXTURE_ENV_MODE , GL_MODULATE);
glBindTexture(GL_TEXTURE_2D,texture[textureIndex]);
Any idea why I'm not getting the desired transparent effect around the sprite?

Try setting GL_TEXTURE_WRAP_S and GL_TEXTURE_WRAP_T to GL_CLAMP_TO_EDGE with glTexParameteri. It's possible that due to floating point error, your texture coordinates at the edge of your polygons are slightly less than 0 or slightly greater than 1.

I discovered the answer. I needed to change this line:
glTexImage2D(GL_TEXTURE_2D,0,3,dx,dy,0,GL_BGRA,GL_UNSIGNED_BYTE,image);
to:
glTexImage2D(GL_TEXTURE_2D,0,4,dx,dy,0,GL_BGRA,GL_UNSIGNED_BYTE,image);

Related

OpenCV using cvImageCreate() with grayscale image fails, and resizing usually fails

I have such code that is loading grayscale image from buffer 1byte, 8bits bitmap. Then it resizes this image.
int resizeBitmap(const unsigned char *inData, const size_t inDataLength, const size_t inWidth, const size_t inHeight,
const int bitDepth, const int noOfChannels, unsigned char **outData, size_t *outDataLength, const size_t outWidth, const size_t outHeight) {
// create input image
IplImage *inImage = cvCreateImage(cvSize(inWidth, inHeight), bitDepth, noOfChannels);
cvSetData(inImage, inData, inImage->widthStep);
// show input image
cvNamedWindow("OpenCV Input Image", CV_WINDOW_FREERATIO);
cvShowImage("OpenCV Input Image", inImage);
cvWaitKey(0);
cvDestroyWindow("OpenCV Input Image");
/* */
// create output image
IplImage *outImage = cvCreateImage(cvSize(outWidth, outHeight), inImage->depth, inImage->nChannels);
// select interpolation type
double scaleFactor = (((double) outWidth)/inWidth + ((double) outHeight)/inHeight)/2;
int interpolation = (scaleFactor > 1.0) ? CV_INTER_LINEAR : CV_INTER_AREA;
// resize from input image to output image
cvResize(inImage, outImage, interpolation);
/* // show output image
cvNamedWindow("OpenCV Output Image", CV_WINDOW_FREERATIO);
cvShowImage("OpenCV Output Image", outImage);
cvWaitKey(0);
cvDestroyWindow("OpenCV Output Image");
*/
// get raw data from output image
int step = 0;
CvSize size;
cvGetRawData(outImage, outData, &step, &size);
*outDataLength = step*size.height;
cvReleaseImage(&inImage);
cvReleaseImage(&outImage);
return 0;
}
I am using here bitDepth = 8 and noOfChannels = 1.
Loaded image is:
and the output is:
this output is not always written as program usually fails with error:
OpenCV Error: Bad number of channels (Source image must have 1, 3 or 4 channels) in cvConvertImage, file /tmp/opencv-20160915-26910-go28a5/opencv-2.4.13/modules/highgui/src/utils.cpp, line 611
libc++abi.dylib: terminating with uncaught exception of type cv::Exception: /tmp/opencv-20160915-26910-go28a5/opencv-2.4.13/modules/highgui/src/utils.cpp:611: error: (-15) Source image must have 1, 3 or 4 channels in function cvConvertImage
I am attaching debugger output as there is interesting situation as I am passing grayscale buffer of size 528480 which equals 1 byte * 1101 *480, but after cvCreateImage there is inside imageSize 529920 and widthStep is 1104! Maybe here is the problem with this image, but why it is ?
This issue is related to widthstep and width of IplImage. Opencv pads the image to have a widthstep of multiple of 4 bytes. Here opencv is using width of 1101 and widthstep of 1104. But data when written from bitmap to IplImage, few extra pixels get written per row(note the diagonal line from top-left to bottom-right).
Note, that the image is not tilted. It's just that every next row is shifted a little to left(by 3 pixels), thus giving the idea of shearing transformation.
It could also be possible that you are giving a smaller width than what Bitmap holds.
See docs here and search for padding. You can try copying all column data row-wise.
Why crash: Sometimes opencv will end up reading beyond Bitmap buffer and may hit untouchable memory addresses, causing exception.
Note: Bitmap probably also has padding from which you received the black diagonal line.
Based on answer saurabheights I have wrote procedure to make padding of each bitmap row to any given multiplicity of bytes in the row.
int padBitmap(const unsigned char *data, const size_t dataLength, const size_t width, const size_t height,
const int bitDepth, const int noOfChannels, unsigned char **paddedData, size_t *paddedDataLength, const size_t row_multiple) {
size_t row_length = (width*noOfChannels*bitDepth)/CHAR_BIT;
size_t row_padding_size = row_multiple - row_length % row_multiple;
if(row_padding_size == 0) return 0;
size_t new_row_length = row_length + row_padding_size;
size_t newDataLength = height * new_row_length;
unsigned char *newData = malloc(sizeof(unsigned char) *newDataLength);
unsigned char padding[3] = {0, 0, 0};
for(int i=0; i<height; i++) {
memcpy(newData + i*new_row_length, data + i*row_length, row_length);
memcpy(newData + i*new_row_length + row_length, padding, row_padding_size);
}
*paddedData = newData;
*paddedDataLength = newDataLength;
return row_padding_size;
}
Now before passing bitmap to resizeBitmap(), I am doing this padding:
unsigned char *paddedData = 0;
size_t paddedDataLength = 0;
int padding = padBitmap(gData, gDataLength, width, height, PNG_BIT_DEPTH_8, GRAYSCALE_COMPONENTS_PER_PIXEL, &paddedData, &paddedDataLength, 4);
width += padding;
And I am using as bitmap paddedData. It seems to work correctly

Why is my bitmap drawing function plotting at an offset from the position I give it? (C VGA Mode 12h)

Hello I´m working on creating a bitmap drawing function on C using VGA in mode 12h using DOSBOX to run the program. I´m getting the image on the screen, but the start of the image is being drawn on the middle on the screen instead of (0,0). Can anyone tell me why I´m getting this behavior?
My plot_pixel function works fine. I´m able to draw lines and plot pixels on the screen without getting the weird offset I´m getting now.
This shows the problem.
Original Image:
Result:
And this is my code:
Load BMP:
/**************************************************************************
* load_bmp *
* Loads a bitmap file into memory. *
**************************************************************************/
void load_bmp(char *file, BITMAP *b){
FILE *fp;
long index;
byte a;
word num_colors;
int x;
//SetGfxMode(0x3);
/*Opening file */
if((fp = fopen(file,"rb")) == NULL){
printf("Error al abrir el archivo %s.\n",file);
exit(1);
}
/*Validating if the image is a valid bitmap*/
if(fgetc(fp) != 'B' || fgetc(fp) != 'M'){
fclose(fp);
printf("%s is not a bitmap file. \n", file);
exit(1);
}
/*Height and width of the image
*/
fskip(fp,16);
fread(&b->width, sizeof(word),1 , fp);
fskip(fp,2);
fread(&b->height, sizeof(word),1,fp);
fskip(fp,22);
fread(&num_colors,sizeof(word),1,fp);
fskip(fp,6);
/* We are loading a 16 color image */
if(num_colors ==0) num_colors = 16;
/*Intentamos alojar memoria para la data del bitmap*/
if((b->data = (byte *) malloc((b->width*b->height))) == NULL)
{
fclose(fp);
printf("Error allocating memory for file %s.\n",file);
exit(1);
}
/*Reading pallete info*/
for(index=0;index<num_colors;index++){
b->pallete[(int)(index*3+2)] = fgetc(fp) >> 2;
b->pallete[(int)(index*3+1)] = fgetc(fp) >> 2;
b->pallete[(int)(index*3+0)] = fgetc(fp) >> 2;
//fskip(fp,240);
x = fgetc(fp);
}
/* Leyendo el bitmap*/
for(index=(b->height-1)*b->width;index>=0;index-=b->width){
for(x=0;x<b->width;x++){
b->data[index+x]=(byte)fgetc(fp);
}
}
fclose(fp);
}
Draw bitmap:
/**************************************************************************
* draw_transparent_bitmap *
* Draws a transparent bitmap. *
**************************************************************************/
void draw_transparent_bitmap(BITMAP *bmp,int x,int y)
{
int i,j;
unsigned long bitmap_offset = 0;
byte data;
copyMemory(double_buffer,VGA);
printf("sum");
getch();
for(j=0;j<bmp->height;j++)
{
for(i=0;i<bmp->width;i++)
{
data = bmp->data[bitmap_offset];
//if (data) double_buffer[screen_offset+x+i] = data;
if(data) plot_pixel(x+i,y+j,data);
bitmap_offset++;
}
}
}
Set Pallete
void set_pallete(byte *pallete){
int i;
outp(PALETTE_INDEX,0);
for(i=0;i<16*3;i++){
outp(PALETTE_DATA,pallete[i]);
}
}
Main:
typedef struct
{
word width;
word height;
byte pallete[256*3];
byte *data;
} BITMAP;
BITMAP fondo_inicio;
load_bmp("home16.bmp",&fondo_inicio);
set_pallete(fondo_inicio.pallete);
draw_transparent_bitmap(&fondo_inicio,0,0);
I'm not persuaded you're loading the BMP correctly. Per Wikipedia, which hopefully managed to get this right as a rare all-but-objective fact, your code, after you've checked for 'BM', assuming fskip is some sort of spin on fseek, takes these steps:
skip the 4 bytes telling you BMP size;
skip the 4 reserved bytes;
skip the 4 bytes telling you where you should load pixel data from (which you really should consume and obey);
assume you're getting a Windows 3.1 secondary header and skip the 4 bytes tell you its length (you shouldn't);
read the lower two bytes of width;
skip the upper two bytes of width;
read the lower two bytes of height;
skip the upper two bytes of height;
skip: number of colour planes (+ 2 bytes), bits per pixel (+ 2 bytes), compression method (+ 4 = 10), image size (+ 4 = 14), horizontal density (+ 4 = 18), vertical density (+4 = 22);
read first two bytes of colour palette size;
skip next two bytes of colour palette size;
skip number of important colours;
assume the headers have then ended (but you should instead have read the header size and skipped appropriate here);
reads an RGBA palette, assuming it knows the image to be 16-colour, discarding the alpha and mapping from 8 bits-per-channel to VGA-style 6 bits;
assume the image data comes straight after the palette (you shouldn't, you should have read its file offset earlier);
read one byte per pixel of image data. Even though you've assumed 4 bits per pixel for reading the palette.
Likely your BMP file isn't 4-bit if reading a whole byte per pixel is providing the correct width of image. That means your assumptions about header size are definitely wrong. Almost certainly what you have stored as the image data is a chunk of header and then the image. Start by not skipping the header entry that tells you where image data begins — read it and use it. Otherwise if your plot_pixel automatically maps eight bits to four then it's not a big problem if you're loading a 256-colour image and assuming that only the lowest sixteen colours are used, assuming that holds true of your source imagery and storage space isn't a concern.

Finding xy position of a bitmap

I wrote a function in order to get the position of the requested pixel position (x250 y230 - central of the entire picture - x500 y460). The problem is that the function returns the position with 17 pixels difference more on up and 12 pixels difference more on right. What am i missing.. the padd? How can i use this function properly?
size_t find (FILE* fp, dword xp, dword yp)
{
int i;
int pointer = (sizeof(DIB)+sizeof(BMP)+2)+(250*3);
for(i=0; i<460; i++)
{
fseek(fp, pointer+(i*pointer), SEEK_SET);
}
return ftell(fp);
}
As I said in my comments, you are indeed missing the padding, but not only that.
A bitmap file is composed of multi parts: Headers, a color map, and a Pixel map (mainly).
From what I understand of your question, you need your function to return the offset address in the file fp (considered to be a bitmap file) of the pixel that would be at position xp ; yp. To do that you need at least three things:
The offset of the pixel map's begginning : you will find it by reading the last 4 bytes (a dword) of the Bitmap file header, you can get it by reading at offset 10 in your file.
The pixel-per-row (or image width) number : you will find it in the BITMAPINFOHEADER
The bit-per-pixel number : you will find it in the BITMAPINFOHEADER
When you have this, the address of your pixel in the file is :
rowSizeInBytes = (((bitPerPixel * imageWidth + 31) * 4) / 32);
pixAddress = pixelMapStartAddress + rowSizeInBytes * yp + ((xp * bitPerPixel) / 8);

Advice on an algorithm for rendering a bitmap pixel by pixel

I've been working on a bitmap loader, with the main goal to do nothing more than parse the data properly and render it in OpenGL. I'm at the point where I need to draw the pixels on an x/y (i.e., pixel by pixel) basis (at least, this is what I think I need to do as far as rendering is concerned). I've already bound the texture object and called glTexImage2D(...).
Currently, what I'm having trouble with is the pixel by pixel algorithm.
As far as I understand it, bitmap (aka DIB) files store color data in what is known as the pixel array. Each row of pixels consists of x amount of bytes, with each pixel holding a byte count divisible either by 4 ( 32 bits per pixel ), 3 ( 24 bits per pixel ), 2 ( 16 bits per pixel ), or 1 ( 8 bits per pixel ).
I think need to loop through the pixels while at the same time calculating the right offset within the pixel array, which is relative to its pixel x/y coordinate. Is this true, though? If not, what should I do? I'm honestly slightly confused as to whether or not, despite doing what was directed to me in this question I asked sometime ago, this approach is correct.
I assume that going about it on a pixel by pixel basis is the right approach, mainly because
rendering a quad with glVertex* and glTexCoord* produced nothing more than a grayed out rectangle (at the time I thought the OpenGL would handle this by itself, hence why attempting that in the first place).
I should also note that, while my question displays OpenGL 3.1 shaders, I moved to SDL 1.2
so I could just use immediate mode for the time being until I got the right algorithms implemented, and then switch back to modern GL.
The test image I'm parsing:
It's data output (pastebinned due to its very long length):
http://pastebin.com/6RVhAVRq
And The Code:
void R_RenderTexture_PixByPix( texture_bmp_t* const data, const vec3 center )
{
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
glBegin( GL_POINTS );
{
const unsigned width = data->img_data->width + ( unsigned int ) center[ VEC_X ];
const unsigned height = data->img_data->height + ( unsigned int ) center[ VEC_Y ];
const unsigned bytecount = GetByteCount( data->img_data->bpp );
const unsigned char* pixels = data->img_data->pixels;
unsigned color_offset = 0;
unsigned x_pixel;
for ( x_pixel = center[ VEC_X ]; x_pixel < width; ++x_pixel )
{
unsigned y_pixel;
for ( y_pixel = center[ VEC_Y ]; y_pixel < height; ++y_pixel )
{
}
const bool do_color_update = true; //<--- replace true with a condition which checks to see if the color needs to be updated.
if ( do_color_update )
{
glColor3fv( pixels + color_offset );
}
color_offset += bytecount;
}
}
glEnd();
glBindTexture( GL_TEXTURE_2D, 0 );
}
You're completely missing the point of a OpenGL texture in your code. The texture holds the image for you and the rasterizer does all the iterations over the pixel data for you. No need to write a slow pixel-pusher loop yourself.
As your code stands right now that texture is completely bogus and does nothing. You could completely omit the calls to glBindTexture and it'd still work – or not, because you're not actually drawing anything, you just set the glColor state. To draw something you'd have to call glVertex.
So why not leverage the pixel-pushing performance of modern GPUs and actually use a texture? How about this:
void R_RenderTexture_PixByPix( texture_bmp_t* const data, const vec3 center )
{
if( 0 == data->texbuf_id ) {
glGenTextures(1, &(data->texbuf_id));
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// there are a few more, but the defaults are ok
// if you didn't change them no need for further unpack settings
GLenum internal_format;
GLenum format;
GLenum type;
switch(data->img_data->bpp) {
case 8:
// this could be a palette or grayscale
internal_format = GL_LUMINANCE8;
format = GL_LUMINANCE;
type = GL_UNSIGNED_BYTE;
break;
case 15:
internal_format = GL_RGB5;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_SHORT_1_5_5_5;
break;
case 16:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_SHORT_5_6_5;
break;
case 24:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_BYTE;
break;
case 32:
internal_format = GL_RGB8;
format = GL_BGR; // BMP files have BGR pixel order
type = GL_UNSIGNED_INT_8_8_8_8;
break;
}
glTexImage2D( GL_TEXTURE_2D, 0, internal_format,
data->img_data->width, data->img_data->height, 0,
format, type, data->img_data->pixels );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
} else {
glBindTexture( GL_TEXTURE_2D, data->texbuf_id );
}
static GLfloat verts[] = {
0, 0,
1, 0,
1, 1,
0, 1
};
// the following is to address texture image pixel centers
// tex coordinates 0 and 1 are not on pixel centers!
float const s0 = 1. / (2.*tex_width);
float const s1 = ( 2.*(tex_width-1) + 1.) / (2.*tex_width);
float const t0 = 1. / (2.*tex_height);
float const t1 = ( 2.*(tex_height-1) + 1.) / (2.*tex_height);
GLfloat texcoords[] = {
s0, t0,
s1, t0,
s1, t1,
s0, t1
};
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
glVertexPointer(2, GL_FLOAT, 0, verts);
glTexCoordPointer(2, GL_FLOAT, 0, texcoords);
glColor4f(1., 1., 1., 1.);
glDrawArrays(GL_QUADS, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glBindTexture( GL_TEXTURE_2D, 0 );
}
Your intuition is basically correct. The pixels are stored as an array of bytes, but the bytes are arranged into consecutive groups, with each group representing a single pixel. To address a single pixel, you'll need to do a calculation like this:
unsigned char* image_data = start_of_pixel_data;
unsigned char* pixel_addr = image_data + bytes_per_pixel * (y * width_in_pixels + x);
Be careful about the width in pixels, as sometimes there is padding at the end of the row to bring the total row width in bytes up to a multiple of 4/8/16/32/64/etc. I recommend looking at the actual bytes of the bitmap in hex first to get a sense of what is going on. It's a great learning exercise and will give you high confidence in your pixel-walking code, which is what you want. You might be able to use a debugger to do this, or else write a simple loop with printf over the image bytes.

How to apply line segment detector (LSD) on a video , frame by frame?

int main()
{
image_double image;
ntuple_list out;
unsigned int xsize,ysize,depth;
int x,y,i,j,width,height,step;
uchar *p;
IplImage* img = 0;
IplImage* dst = 0;
img = cvLoadImage("D:\\Ahram.jpg",CV_LOAD_IMAGE_COLOR);
width = img->width;
height = img->height;
dst=cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,1);
cvCvtColor(img,dst,CV_RGB2GRAY);
width=dst->width;
height=dst->height;
step=dst->widthstep;
p=(uchar*)dst->imageData;
image=new_image_double(dst->width,dst->height);
xsize=dst->width;
for(i=0;i<height;i++)
{
for(j=0;j<width;j++)
{
image->data[i+j*xsize]=p[i*step+j];
}
}
/* call LSD */
out = lsd(dst);
/* print output */
printf("%u line segments found:\n",out->size);
for(i=0;i<out->size;i++)
{
for(j=0;j<out->dim;j++)
printf("%f ",out->values[ i * out->dim + j ]);
printf("\n");
}
/* free memory */
free_image_double(image);
free_ntuple_list(out);
return 0;
}
N.B:it has no errors but when i run it gives out an LSD internal error:invalid image input
Start by researching how PGM is structured:
Each PGM image consists of the following:
1. A "magic number" for identifying the file type.
A pgm image's magic number is the two characters "P5".
2. Whitespace (blanks, TABs, CRs, LFs).
3. A width, formatted as ASCII characters in decimal.
4. Whitespace.
5. A height, again in ASCII decimal.
6. Whitespace.
7. The maximum gray value (Maxval), again in ASCII decimal.
Must be less than 65536, and more than zero.
8. A single whitespace character (usually a newline).
9. A raster of Height rows, in order from top to bottom.
Each row consists of Width gray values, in order from left to right.
Each gray value is a number from 0 through Maxval, with 0 being black
and Maxval being white. Each gray value is represented in pure binary
by either 1 or 2 bytes. If the Maxval is less than 256, it is 1 byte.
Otherwise, it is 2 bytes. The most significant byte is first.
For PGM type P2, pixels are readable (ASCII) on the file, but for P5 they won't be because they will be stored in binary format.
One important thing you should know, is that this format takes only 1 channel per pixel. This means PGM can only store GREY scaled images. Remember this!
Now, if you're using OpenCV to load images from a file, you should load them using CV_LOAD_IMAGE_GRAYSCALE:
IplImage* cv_img = cvLoadImage("chairs.png", CV_LOAD_IMAGE_GRAYSCALE);
if(!cv_img)
{
std::cout << "ERROR: cvLoadImage failed" << std::endl;
return -1;
}
But if you use any other flag on this function or if you create an image with cvCreateImage(), or if you're capturing frames from a camera or something like that, you'll need to convert each frame to its grayscale representation using cvCvtColor().
I downloaded lsd-1.5 and noticed that there is an example there that shows how to use the library. One of the source code files, named lsd_cmd.c, manually reads a PGM file and assembles an image_double with it. The function that does this trick is read_pgm_image_double(), and it reads the pixels from a PGM file and stores them inside image->data. This is important because if the following does not work, you'll have to iterate on the pixels of IplImage and do this yourself.
After successfully loading a gray scaled image into IplImage* cv_img, you can try to create the structure you need with:
image_double image = new_image_double(cv_img->width, cv_img->height);
image->data = (double) cv_img->imageData;
In case this doesn't work, you'll need to check the file I suggested above and iterate through the pixels of cv_img->imageData and copy them one by one (doing the proper type conversion) to image->data.
At the end, don't forget to free this resource when you're done using it:
free_image_double(image);
This question helped me some time ago. You probably solved it already so sorry for the delay but i'm sharing now the answer.
I'm using lsd 1.6 and the lsd interface is a little different from the one you are using (they changed the lsd function interface from 1.5 to 1.6).
CvCapture* capture;
capture = cvCreateCameraCapture (0);
assert( capture != NULL );
//get capture properties
int width = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
int height = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
//create OpenCV image structs
IplImage *frame;
IplImage *frameBW = cvCreateImage( cvSize( width, height ), IPL_DEPTH_8U, 1 );
//create LSD image type
double *image;
image = (double *) malloc( width * height * sizeof(double) );
while (1) {
frame = cvQueryFrame( capture );
if( !frame ) break;
//convert to grayscale
cvCvtColor( frame , frameBW, CV_RGB2GRAY);
//cast into LSD image type
uchar *data = (uchar *)frameBW->imageData;
for (i=0;i<width;i++){
for(j=0;j<height;j++){
image[ i + j * width ] = data[ i + j * width];
}
}
//run LSD
double *list;
int n;
list = lsd( &n, image, width, height );
//DO PROCESSING DRAWING ETC
//draw segments on frame
for (int j=0; j<n ; j++){
//define segment end-points
CvPoint pt1 = cvPoint(list[ 0 + j * 7 ],list[ 1 + j * 7 ]);
CvPoint pt2 = cvPoint(list[ 2 + j * 7 ],list[ 3 + j * 7 ]);
// draw line segment on frame
cvLine(frame,pt1,pt2,CV_RGB(255,0,0),1.5,8,0);
}
cvShowImage("FRAME WITH LSD",frame);
//free memory
free( (void *) list );
char c = cvWaitKey(1);
if( c == 27 ) break; // ESC QUITS
}
//free memory
free( (void *) image );
cvReleaseImage( &frame );
cvReleaseImage( &frameBW );
cvDestroyWindow( "FRAME WITH LSD");
Hope this helps you or someone in the future! LSD works really great.

Resources