How to cope with WebGL missing glBlendEquation(GL_MAX) - c

Here's my current C code that does what I'd like to do, but it relies on glBlendEquation(GL_MAX) which is unavailable in WebGL. What I want is to render a wiggly fuzzy line. I could use a Gaussian blur but it would have to have a VERY large radius (16 pixels) and I expect it would be REALLY slow.
Note I've removed some gl state management code and a couple other things fore clarity but the code should work as is.
Existing C code:
static const char *pnt_vtx_shader =
"#version 110\n"
"varying vec2 uv;\n"
"void main() {\n"
" uv = (gl_MultiTexCoord0.st - 1.0f);\n"
" gl_Position = gl_Vertex;\n"
"}";
static const char *pnt_shader_src =
"#version 110\n"
"varying vec2 uv;\n"
"void main() {\n"
" gl_FragColor = vec4(exp(-4.5f*0.5f*log2(dot(uv,uv)+1.0f)));\n"
"}";
GLuint shader_prog ;
int samp;
float pw, ph;
float sco_verts[128*8*4];
int sco_ind[128*3*6];
void init(int width, int height, int num_samp)
{
pw = 0.5f*fmaxf(1.0f/24, 8.0f/width), ph = 0.5f*fmaxf(1.0f/24, 8.0f/height);
samp = num_samp;
// helper function, compiles and links the shader, prints out any errors
shader_prog = compile_program(pnt_vtx_shader, pnt_shader_src);
for(int i=0; i<samp; i++) {
sco_verts[(i*8+0)*4+0] = 0; sco_verts[(i*8+0)*4+1] = 2;
sco_verts[(i*8+1)*4+0] = 0; sco_verts[(i*8+1)*4+1] = 0;
sco_verts[(i*8+2)*4+0] = 1; sco_verts[(i*8+2)*4+1] = 2;
sco_verts[(i*8+3)*4+0] = 1; sco_verts[(i*8+3)*4+1] = 0;
sco_verts[(i*8+4)*4+0] = 1; sco_verts[(i*8+4)*4+1] = 2;
sco_verts[(i*8+5)*4+0] = 1; sco_verts[(i*8+5)*4+1] = 0;
sco_verts[(i*8+6)*4+0] = 2; sco_verts[(i*8+6)*4+1] = 2;
sco_verts[(i*8+7)*4+0] = 2; sco_verts[(i*8+7)*4+1] = 0;
}
for(int i=0; i<samp; i++) {
sco_ind[(i*6+0)*3+0] = i*8+0; sco_ind[(i*6+0)*3+1] = i*8+1; sco_ind[(i*6+0)*3+2] = i*8+3;
sco_ind[(i*6+1)*3+0] = i*8+0; sco_ind[(i*6+1)*3+1] = i*8+3; sco_ind[(i*6+1)*3+2] = i*8+2;
sco_ind[(i*6+2)*3+0] = i*8+2; sco_ind[(i*6+2)*3+1] = i*8+4; sco_ind[(i*6+2)*3+2] = i*8+5;
sco_ind[(i*6+3)*3+0] = i*8+2; sco_ind[(i*6+3)*3+1] = i*8+5; sco_ind[(i*6+3)*3+2] = i*8+3;
sco_ind[(i*6+4)*3+0] = i*8+4; sco_ind[(i*6+4)*3+1] = i*8+6; sco_ind[(i*6+4)*3+2] = i*8+7;
sco_ind[(i*6+5)*3+0] = i*8+4; sco_ind[(i*6+5)*3+1] = i*8+7; sco_ind[(i*6+5)*3+2] = i*8+5;
}
}
// getsamp does some averaging over samples
static float getsamp(const float *data, int len, int i, int w) {
float sum = 0, err = 0;
int l = IMAX(i-w, 0);
int u = IMIN(i+w, len);
for(int i = l; i < u; i++)
sum+= data[i];
return sum / (2*w);
}
// R holds a rotation matrix... it's the transpose of what you would give GL though
// because of reasons :P (I wrote code that did all the stuff from this program in
// software first and the GL version shares a bunch of code with that one)
// data is audio samples, [-1, 1], the length of the array is in len
void render_scope(float R[3][3], const float *data, int len)
{
// do the rotate/project ourselves because the GL matrix won't do the right
// thing if we just send it our verticies, we want wour tris to always be
// parrallel to the view plane, because we're actually drawing a fuzzy line
// not a 3D object
// also it makes it easier to match the software implementation
float px, py;
{
float s = getsamp(data, len, 0, len/96);
s=copysignf(log2f(fabsf(s)*3+1)/2, s);
float xt = -0.5f, yt = 0.2f*s, zt = 0.0f;
float x = R[0][0]*xt + R[1][0]*yt + R[2][0]*zt;
float y = R[0][1]*xt + R[1][1]*yt + R[2][1]*zt;
float z = R[0][2]*xt + R[1][2]*yt + R[2][2]*zt;
const float zvd = 1/(z+2);
px=x*zvd*4/3; py=y*zvd*4/3;
}
for(int i=0; i<samp; i++) {
float s = getsamp(data, len, (i+1)*len/(samp), len/96);
s=copysignf(log2f(fabsf(s)*3+1)/2, s);
float xt = (i+1 - (samp)/2.0f)*(1.0f/(samp)), yt = 0.2f*s, zt = 0.0f;
float x = R[0][0]*xt + R[1][0]*yt + R[2][0]*zt;
float y = R[0][1]*xt + R[1][1]*yt + R[2][1]*zt;
float z = R[0][2]*xt + R[1][2]*yt + R[2][2]*zt;
const float zvd = 1/(z+2);
x=x*zvd*4/3; y=y*zvd*4/3;
const float dx=x-px, dy=y-py;
const float d = 1/hypotf(dx, dy);
const float tx=dx*d*pw, ty=dy*d*pw;
const float nx=-dy*d*pw, ny=dx*d*ph;
sco_verts[(i*8+0)*4+2] = px-nx-tx; sco_verts[(i*8+0)*4+3] = py-ny-ty;
sco_verts[(i*8+1)*4+2] = px+nx-tx; sco_verts[(i*8+1)*4+3] = py+ny-ty;
sco_verts[(i*8+2)*4+2] = px-nx ; sco_verts[(i*8+2)*4+3] = py-ny;
sco_verts[(i*8+3)*4+2] = px+nx ; sco_verts[(i*8+3)*4+3] = py+ny;
sco_verts[(i*8+4)*4+2] = x-nx ; sco_verts[(i*8+4)*4+3] = y-ny;
sco_verts[(i*8+5)*4+2] = x+nx ; sco_verts[(i*8+5)*4+3] = y+ny;
sco_verts[(i*8+6)*4+2] = x-nx+tx; sco_verts[(i*8+6)*4+3] = y-ny+ty;
sco_verts[(i*8+7)*4+2] = x+nx+tx; sco_verts[(i*8+7)*4+3] = y+ny+ty;
px=x,py=y;
}
glEnable(GL_BLEND);
glBlendEquation(GL_MAX);
glUseProgram(shader_prog);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, sizeof(float)*4, sco_verts);
glVertexPointer(2, GL_FLOAT, sizeof(float)*4, sco_verts + 2);
glDrawElements(GL_TRIANGLES, samp*3*6, GL_UNSIGNED_INT, sco_ind);
}
Here's a screenshot from a test app, I'm not sure the line width is right in this screen shot... but meh it gives the idea, also I'd be using way more points so the lines would be smoother.

Related

How to use a lookAt matrix to compute ray in raytracing?

As I understand, the 'lookat' method is one of the simplest way to placing/rotate the camera in a scene. So I implemented the Matrix available on (https://www.scratchapixel.com/lessons/mathematics-physics-for-computer-graphics/lookat-function) in the code of my ray-tracing but I have no idea of how using it to compute rays.
Basically what I do is place the camera at negatives Z, send a ray to positive Z and select the pixel iterating the X and Y of my view plane.
It is easy because the view plane is in front of the camera and I have to simply assign X and Y of my iterations to ray destination X and Y.
However I would like to be able to send ray in any part of the space.
Could you please help me to understand how to do that?
Thank you!
What I do basically:
{
double deg = 50.;
double rad = deg / (180.0 / M_PI);
double distance = (WIDTH / 2) * (cotan(rad / 2));
ray.orig.x = HEIGH / 2.0;
ray.orig.y = WIDTH / 2.0;
ray.orig.z = -distance;
y = -1;
while (++y <= HEIGH)
{
x = -1;
while (++x <= WIDTH)
{
ray.dest.x = x - ray.orig.x;
ray.dest.y = y - ray.orig.y;
ray.dest.z = 0. - ray.orig.z;
ray.dest = ve_normalize(&ray.dest);
check_objects(c, &ray, 0);
add_diffuse_light(c);
put_pixel(c, &x, &y);
}
}
}
The functions to handle the lookat matrix:
t_lookat lookati(t_vector *from, t_vector *to)
{
t_lookat lookat;
t_vector fo;
t_vector ri;
t_vector up;
t_vector tmp;
tmp.x = 0; tmp.y = 1; tmp.z = 0;
fo = ve_subtraction(from, to);
fo = ve_normalize(&fo);
ri = ve_cross(&tmp, &fo);
ri = ve_normalize(&ri);
up = ve_cross(&fo, &ri);
up = ve_normalize(&up);
lookat.ri.x = ri.x;
lookat.ri.y = ri.y;
lookat.ri.z = ri.z;
lookat.up.x = up.x;
lookat.up.y = up.y;
lookat.up.z = up.z;
lookat.fo.x = fo.x;
lookat.fo.y = fo.y;
lookat.fo.z = fo.z;
lookat.fr.x = from->x;
lookat.fr.y = from->y;
lookat.fr.z = from->z;
return(lookat);
}
t_vector orientate(t_vector *a, t_vector *from, t_vector *to)
{
t_lookat k;
k = lookati(from, to);
t_vector orientate;
orientate.x = a->x * k.ri.x + a->y * k.up.x + a->z * k.fo.x + a->x * k.fr.x;
orientate.y = a->x * k.ri.y + a->y * k.up.y + a->z * k.fo.y + a->x * k.fr.y;
orientate.z = a->x * k.ri.z + a->y * k.up.z + a->z * k.fo.z + a->x * k.fr.z;
return(orientate);
}
Thank you guys, finally I solved the problem reading this guide (https://steveharveynz.wordpress.com/2012/12/20/ray-tracer-part-two-creating-the-camera) which suggests to normalize coordinates (like the pixel range of the user "Spektre") without using a matrix.
Ps.
typedef struct s_vector
{
double x;
double y;
double z;
} t_vector;
typedef struct s_lookat
{
t_vector ri; //right vector
t_vector up; // up
t_vector fo; // foorward
t_vector fr; // eye position
} t_lookat;

CUDA C - how to use Texture2D for double precision floating point

I want to use texture 2D memory for double precision. I want to read from texture to shared memory and convert int2 to double, and then transfer back to host memory But I am getting only first row as desired and all other row's value is 2.00000000.
#include<stdio.h>
#include<cuda.h>
#define Xdim 8
#define Ydim 8
texture<int2,2>me_texture;
static __inline__ __device__ double fetch_double(int2 p){
return __hiloint2double(p.y, p.x);
}
__global__ void kern(double *o, int pitch){
__shared__ double A[Xdim][Ydim];
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
int2 jj;
if(i<Xdim && j<Ydim){
jj = tex2D(me_texture, i, j);
A[threadIdx.x][threadIdx.y] = fetch_double(jj);
}
__syncthreads();
if(i<Xdim && j<Ydim){
o[j*Xdim + i] = A[threadIdx.x][threadIdx.y];
}
}
int main(int argc, char *argv[]){
double hbuf[Xdim][Ydim];
double hout[Xdim][Ydim];
double *dob;
double *dbuf;
size_t pitch_bytes;
cudaMallocPitch((void**)&dbuf, &pitch_bytes, sizeof(double)*Xdim, Ydim);
cudaMallocPitch((void**)&dob, &pitch_bytes, sizeof(double)*Xdim, Ydim);
hbuf[0][0] = 1.234567891234567;
hbuf[0][1] = 12.34567891234567;
hbuf[0][2] = 123.4567891234567;
hbuf[0][3] = 1234.567891234567;
hbuf[0][4] = 12345.67891234567;
hbuf[0][5] = 123456.7891234567;
hbuf[0][6] = 1234567.891234567;
hbuf[0][7] = 12345678.91234567;
hbuf[1][0] = 123456789.1234567;
hbuf[1][1] = 1234567891.234567;
hbuf[1][2] = 12345678912.34567;
hbuf[1][3] = 123456789123.4567;
hbuf[1][4] = 1234567891234.567;
hbuf[1][5] = 12345678912345.67;
hbuf[1][6] = 123456789123456.7;
hbuf[1][7] = 1234567891234567;
hbuf[2][0] = 123456789.7654321;
hbuf[2][1] = 1234567897.654321;
hbuf[2][2] = 12345678976.54321;
hbuf[2][3] = 123456789765.4321;
hbuf[2][4] = 1234567897654.321;
hbuf[2][5] = 12345678976543.21;
hbuf[2][6] = 123456789765432.1;
hbuf[2][7] = 1234567897654321;
hbuf[3][0] = 9.876543211234567;
hbuf[3][1] = 98.76543211234567;
hbuf[3][2] = 987.6543211234567;
hbuf[3][3] = 9876.543211234567;
hbuf[3][4] = 98765.43211234567;
hbuf[3][5] = 987654.3211234567;
hbuf[3][6] = 9876543.211234567;
hbuf[3][7] = 98765432.11234567;
hbuf[4][0] = 987654321.1234567;
hbuf[4][1] = 9876543211.234567;
hbuf[4][2] = 98765432112.34567;
hbuf[4][3] = 987654321123.4567;
hbuf[4][4] = 9876543211234.567;
hbuf[4][5] = 98765432112345.67;
hbuf[4][6] = 987654321123456.7;
hbuf[4][7] = 9876543211234567;
hbuf[5][0] = 987654321.7654321;
hbuf[5][1] = 9876543217.654321;
hbuf[5][2] = 98765432176.54321;
hbuf[5][3] = 987654321765.4321;
hbuf[5][4] = 9876543217654.321;
hbuf[5][5] = 98765432176543.21;
hbuf[5][6] = 987654321765432.1;
hbuf[5][7] = 9876543217654321;
hbuf[6][0] = 1234567891234567;
hbuf[6][1] = 123456789123456.7;
hbuf[6][2] = 12345678912345.67;
hbuf[6][3] = 1234567891234.567;
hbuf[6][4] = 123456789123.4567;
hbuf[6][5] = 12345678912.34567;
hbuf[6][6] = 1234567891.234567;
hbuf[6][7] = 123456789.1234567;
hbuf[7][0] = 12345678.91234567;
hbuf[7][1] = 1234567.891234567;
hbuf[7][2] = 123456.7891234567;
hbuf[7][3] = 12345.67891234567;
hbuf[7][4] = 1234.567891234567;
hbuf[7][5] = 123.4567891234567;
hbuf[7][6] = 12.34567891234567;
hbuf[7][7] = 1.234567891234567;
for (int i=0; i<Xdim; i++){
for(int j=0; j<Ydim; j++){
printf("%.16f\t", hbuf[i][j]);
}
printf("\n");
}
cudaMemcpy2D(dbuf, pitch_bytes, hbuf, Xdim*sizeof(double), Xdim*sizeof(double), Ydim, cudaMemcpyHostToDevice);
me_texture.addressMode[0] = cudaAddressModeClamp;
me_texture.addressMode[1] = cudaAddressModeClamp;
me_texture.filterMode = cudaFilterModeLinear;
me_texture.normalized = false;
cudaBindTexture2D(0, me_texture, dbuf, cudaCreateChannelDesc(32,32,0,0, cudaChannelFormatKindSigned), Xdim, Ydim, pitch_bytes );
int pitch = pitch_bytes/sizeof(double);
kern<<<1, 64>>>(dob, pitch);
cudaMemcpy2D(hout,Xdim*sizeof(double), dob, pitch_bytes, Xdim*sizeof(double),Ydim, cudaMemcpyDeviceToHost);
printf("\nI am Fine\n");
for(int i = 0 ; i < Xdim ; i++){
for(int j=0; j<Ydim; j++){
printf("%.16f\t", hout[i][j]);
}
printf("\n");
}
cudaUnbindTexture(me_texture);
cudaFree(dbuf);
cudaFree(dob);
return 0;
}
Above code work fine if you change the following things.
Replace
kern<<<1, 64>>>(..., ..)
to
dim3 blockPerGrid(1, 1)
dim3 threadPerBlock(8, 8)
kern<<<blockPerGrid, threadPerBlock>>>(....)
here in place of Xdim change it to pitch
o[j*pitch + i] = A[threadIdx.x][threadIdx.y];
And change cudaFilterModeLinear to cudaFilterModePoint .
For the compilation you need to specify the computing capability, suppose your compute capability ie 3.0 then it would be
nvcc -arch=sm_30 file.cu
If your code contained error checking, you would realise that your kernel launch is failing with an invalid filter mode. It isn't legal in CUDA to use a cudaFilterModeLinear with non-float types, so nothing is actually running. If you change the filter mode to cudaFilterModePoint, you might find things start working.

Reverse the Fish-eye Distortion(I've used openCV with VC++)

I've made a simulation of fish eye distortion.
I want to develop a reverse program that can convert the distorted image to normal image.
I've tried to use undistortPonts() function but couldn't understand the input(dist-coefficient).
cv.UndistortPoints(distorted, undistorted, intrinsics, dist_coeffs)
My code for fish eye distortion:
#include "stdio.h"
#include <cv.h>
#include <highgui.h>
#include <math.h>
#include <iostream>
void sampleImage(const IplImage* arr, float idx0, float idx1, CvScalar& res)
{
if(idx0<0 || idx1<0 || idx0>(cvGetSize(arr).height-1) || idx1>(cvGetSize(arr).width-1))
{
res.val[0]=0;
res.val[1]=0;
res.val[2]=0;
res.val[3]=0;
return;
}
float idx0_fl=floor(idx0);
float idx0_cl=ceil(idx0);
float idx1_fl=floor(idx1);
float idx1_cl=ceil(idx1);
CvScalar s1=cvGet2D(arr,(int)idx0_fl,(int)idx1_fl);
CvScalar s2=cvGet2D(arr,(int)idx0_fl,(int)idx1_cl);
CvScalar s3=cvGet2D(arr,(int)idx0_cl,(int)idx1_cl);
CvScalar s4=cvGet2D(arr,(int)idx0_cl,(int)idx1_fl);
float x = idx0 - idx0_fl;
float y = idx1 - idx1_fl;
res.val[0]= s1.val[0]*(1-x)*(1-y) + s2.val[0]*(1-x)*y + s3.val[0]*x*y + s4.val[0]*x*(1-y);
res.val[1]= s1.val[1]*(1-x)*(1-y) + s2.val[1]*(1-x)*y + s3.val[1]*x*y + s4.val[1]*x*(1-y);
res.val[2]= s1.val[2]*(1-x)*(1-y) + s2.val[2]*(1-x)*y + s3.val[2]*x*y + s4.val[2]*x*(1-y);
res.val[3]= s1.val[3]*(1-x)*(1-y) + s2.val[3]*(1-x)*y + s3.val[3]*x*y + s4.val[3]*x*(1-y);
}
float xscale;
float yscale;
float xshift;
float yshift;
float getRadialX(float x,float y,float cx,float cy,float k)
{
x = (x*xscale+xshift);
y = (y*yscale+yshift);
float res = x+((x-cx)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
return res;
}
float getRadialY(float x,float y,float cx,float cy,float k)
{
x = (x*xscale+xshift);
y = (y*yscale+yshift);
float res = y+((y-cy)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
return res;
}
float thresh = 1;
float calc_shift(float x1,float x2,float cx,float k)
{
float x3 = x1+(x2-x1)*0.5;
float res1 = x1+((x1-cx)*k*((x1-cx)*(x1-cx)));
float res3 = x3+((x3-cx)*k*((x3-cx)*(x3-cx)));
// std::cerr<<"x1: "<<x1<<" - "<<res1<<" x3: "<<x3<<" - "<<res3<<std::endl;
if(res1>-thresh && res1 < thresh)
return x1;
if(res3<0)
{
return calc_shift(x3,x2,cx,k);
}
else
{
return calc_shift(x1,x3,cx,k);
}
}
int main(int argc, char** argv)
{
IplImage* src = cvLoadImage( "D:\\2012 Projects\\FishEye\\Debug\\images\\grid1.bmp", 1 );
IplImage* dst = cvCreateImage(cvGetSize(src),src->depth,src->nChannels);
IplImage* dst2 = cvCreateImage(cvGetSize(src),src->depth,src->nChannels);
float K=0.002;
float centerX=(float)(src->width/2);
float centerY=(float)(src->height/2);
int width = cvGetSize(src).width;
int height = cvGetSize(src).height;
xshift = calc_shift(0,centerX-1,centerX,K);
float newcenterX = width-centerX;
float xshift_2 = calc_shift(0,newcenterX-1,newcenterX,K);
yshift = calc_shift(0,centerY-1,centerY,K);
float newcenterY = height-centerY;
float yshift_2 = calc_shift(0,newcenterY-1,newcenterY,K);
// scale = (centerX-xshift)/centerX;
xscale = (width-xshift-xshift_2)/width;
yscale = (height-yshift-yshift_2)/height;
std::cerr<<xshift<<" "<<yshift<<" "<<xscale<<" "<<yscale<<std::endl;
std::cerr<<cvGetSize(src).height<<std::endl;
std::cerr<<cvGetSize(src).width<<std::endl;
for(int j=0;j<cvGetSize(dst).height;j++)
{
for(int i=0;i<cvGetSize(dst).width;i++)
{
CvScalar s;
float x = getRadialX((float)i,(float)j,centerX,centerY,K);
float y = getRadialY((float)i,(float)j,centerX,centerY,K);
sampleImage(src,y,x,s);
cvSet2D(dst,j,i,s);
}
}
#if 0
cvNamedWindow( "Source1", 1 );
cvShowImage( "Source1", dst);
cvWaitKey(0);
#endif
cvSaveImage("D:\\2012 Projects\\FishEye\\Debug\\images\\grid3.bmp",dst,0);
cvNamedWindow( "Source1", 1 );
cvShowImage( "Source1", src);
cvWaitKey(0);
cvNamedWindow( "Distortion", 2 );
cvShowImage( "Distortion", dst);
cvWaitKey(0);
#if 0
for(int j=0;j<cvGetSize(src).height;j++)
{
for(int i=0;i<cvGetSize(src).width;i++)
{
CvScalar s;
sampleImage(src,j+0.25,i+0.25,s);
cvSet2D(dst,j,i,s);
}
}
cvNamedWindow( "Source1", 1 );
cvShowImage( "Source1", src);
cvWaitKey(0);
#endif
}
Actually, my original anwser was about the undistortion algorithm for individual points. If you want to undistort a complete image, there is a much simpler technique, as explained in this other thread:
Understanding of openCV undistortion
The outline of the algorithm (which is the one used in OpenCV function undistort()) is as follow. For each pixel of the destination lens-corrected image do:
Convert the pixel coordinates (u_dst, v_dst) to normalized coordinates (x', y') using the inverse of the calibration matrix K,
Apply your lens-distortion model, to obtain the distorted normalized coordinates (x'', y''),
Convert (x'', y'') to distorted pixel coordinates (u_src, v_src) using the calibration matrix K,
Use the interpolation method of your choice to find the intensity/depth associated with the pixel coordinates (u_src, v_src) in the source image, and assign this intensity/depth to the current destination pixel (u_dst, v_dst).
Original answer:
Here is the undistortion algorithm extracted from OpenCV function undistortPoints() :
void dist2norm(const cv::Point2d &pt_dist, cv::Point2d &pt_norm) const {
pt_norm.x = (pt_dist.x-Kcx)/Kfx;
pt_norm.y = (pt_dist.y-Kcy)/Kfy;
int niters=(Dk1!=0.?5:0);
double x0=pt_norm.x, y0=pt_norm.y;
for(int i=0; i<niters; ++i) {
double x2=pt_norm.x*pt_norm.x,
y2=pt_norm.y*pt_norm.y,
xy=pt_norm.x*pt_norm.y,
r2=x2+y2;
double icdist = 1./(1 + ((Dk3*r2 + Dk2)*r2 + Dk1)*r2);
double deltaX = 2*Dp1*xy + Dp2*(r2 + 2*x2);
double deltaY = Dp1*(r2 + 2*y2) + 2*Dp2*xy;
pt_norm.x = (x0-deltaX)*icdist;
pt_norm.y = (y0-deltaY)*icdist;
}
}
If you provide the coordinates of a point in the distorted image in argument pt_dist, it will calculate the normalized coordinates of the associated point and return them in pt_norm. Then, you can obtain the coordinates of the associated point in the undistorted image as
pt_undist = K . [pt_norm.x; pt_norm.y; 1]
where K is the camera matrix.
The standard lens distortion model used by OpenCV is explained at the beginning of this page:
where the distortion coefficients are (k1,k2,p1,p2,k3, k4,k5,k6) (most often we use k4=k5=k6=0).
I don't know what is your model for FishEye distortion, but you can surely adapt the above algorithm to your case. Otherwise, you may use a non-linear optimization algorithm (e.g. Levenberg-Marquardt or any other), to recover the undistorted coordinates from the distorted one.

Grainy looking sphere in my ray tracer

I am trying to write a simple ray tracer. The final image should like this: I have read stuff about it and below is what I am doing:
create an empty image (to fill each pixel, via ray tracing)
for each pixel [for each row, each column]
create the equation of the ray emanating from our pixel
trace() ray:
if ray intersects SPHERE
compute local shading (including shadow determination)
return color;
Now, the scene data is like: It sets a gray sphere of radius 1 at (0,0,-3). It sets a white light source at the origin.
2
amb: 0.3 0.3 0.3
sphere
pos: 0.0 0.0 -3.0
rad: 1
dif: 0.3 0.3 0.3
spe: 0.5 0.5 0.5
shi: 1
light
pos: 0 0 0
col: 1 1 1
Mine looks very weird :
//check ray intersection with the sphere
boolean intersectsWithSphere(struct point rayPosition, struct point rayDirection, Sphere sp,float* t){
//float a = (rayDirection.x * rayDirection.x) + (rayDirection.y * rayDirection.y) +(rayDirection.z * rayDirection.z);
// value for a is 1 since rayDirection vector is normalized
double radius = sp.radius;
double xc = sp.position[0];
double yc =sp.position[1];
double zc =sp.position[2];
double xo = rayPosition.x;
double yo = rayPosition.y;
double zo = rayPosition.z;
double xd = rayDirection.x;
double yd = rayDirection.y;
double zd = rayDirection.z;
double b = 2 * ((xd*(xo-xc))+(yd*(yo-yc))+(zd*(zo-zc)));
double c = (xo-xc)*(xo-xc) + (yo-yc)*(yo-yc) + (zo-zc)*(zo-zc) - (radius * radius);
float D = b*b + (-4.0f)*c;
//ray does not intersect the sphere
if(D < 0 ){
return false;
}
D = sqrt(D);
float t0 = (-b - D)/2 ;
float t1 = (-b + D)/2;
//printf("D=%f",D);
//printf(" t0=%f",t0);
//printf(" t1=%f\n",t1);
if((t0 > 0) && (t1 > 0)){
*t = min(t0,t1);
return true;
}
else {
*t = 0;
return false;
}
}
Below is the trace() function:
unsigned char* trace(struct point rayPosition, struct point rayDirection, Sphere * totalspheres) {
struct point tempRayPosition = rayPosition;
struct point tempRayDirection = rayDirection;
float f=0;
float tnear = INFINITY;
boolean sphereIntersectionFound = false;
int sphereIndex = -1;
for(int i=0; i < num_spheres ; i++){
float t = INFINITY;
if(intersectsWithSphere(tempRayPosition,tempRayDirection,totalspheres[i],&t)){
if(t < tnear){
tnear = t;
sphereIntersectionFound = true;
sphereIndex = i;
}
}
}
if(sphereIndex < 0){
//printf("No interesection found\n");
mycolor[0] = 1;
mycolor[1] = 1;
mycolor[2] = 1;
return mycolor;
}
else {
Sphere sp = totalspheres[sphereIndex];
//intersection point
hitPoint[0].x = tempRayPosition.x + tempRayDirection.x * tnear;
hitPoint[0].y = tempRayPosition.y + tempRayDirection.y * tnear;
hitPoint[0].z = tempRayPosition.z + tempRayDirection.z * tnear;
//normal at the intersection point
normalAtHitPoint[0].x = (hitPoint[0].x - totalspheres[sphereIndex].position[0])/ totalspheres[sphereIndex].radius;
normalAtHitPoint[0].y = (hitPoint[0].y - totalspheres[sphereIndex].position[1])/ totalspheres[sphereIndex].radius;
normalAtHitPoint[0].z = (hitPoint[0].z - totalspheres[sphereIndex].position[2])/ totalspheres[sphereIndex].radius;
normalizedNormalAtHitPoint[0] = normalize(normalAtHitPoint[0]);
for(int j=0; j < num_lights ; j++) {
for(int k=0; k < num_spheres ; k++){
shadowRay[0].x = lights[j].position[0] - hitPoint[0].x;
shadowRay[0].y = lights[j].position[1] - hitPoint[0].y;
shadowRay[0].z = lights[j].position[2] - hitPoint[0].z;
normalizedShadowRay[0] = normalize(shadowRay[0]);
//R = 2 * ( N dot L) * N - L
reflectionRay[0].x = - 2 * dot(normalizedShadowRay[0],normalizedNormalAtHitPoint[0]) * normalizedNormalAtHitPoint[0].x +normalizedShadowRay[0].x;
reflectionRay[0].y = - 2 * dot(normalizedShadowRay[0],normalizedNormalAtHitPoint[0]) * normalizedNormalAtHitPoint[0].y +normalizedShadowRay[0].y;
reflectionRay[0].z = - 2 * dot(normalizedShadowRay[0],normalizedNormalAtHitPoint[0]) * normalizedNormalAtHitPoint[0].z +normalizedShadowRay[0].z;
normalizeReflectionRay[0] = normalize(reflectionRay[0]);
struct point temp;
temp.x = hitPoint[0].x + (shadowRay[0].x * 0.0001 );
temp.y = hitPoint[0].y + (shadowRay[0].y * 0.0001);
temp.z = hitPoint[0].z + (shadowRay[0].z * 0.0001);
struct point ntemp = normalize(temp);
float f=0;
struct point tempHitPoint;
tempHitPoint.x = hitPoint[0].x + 0.001;
tempHitPoint.y = hitPoint[0].y + 0.001;
tempHitPoint.z = hitPoint[0].z + 0.001;
if(intersectsWithSphere(hitPoint[0],ntemp,totalspheres[k],&f)){
// if(intersectsWithSphere(tempHitPoint,ntemp,totalspheres[k],&f)){
printf("In shadow\n");
float r = lights[j].color[0];
float g = lights[j].color[1];
float b = lights[j].color[2];
mycolor[0] = ambient_light[0] + r;
mycolor[1] = ambient_light[1] + g;
mycolor[2] = ambient_light[2] + b;
return mycolor;
} else {
// point is not is shadow , use Phong shading to determine the color of the point.
//I = lightColor * (kd * (L dot N) + ks * (R dot V) ^ sh)
//(for each color channel separately; note that if L dot N < 0, you should clamp L dot N to zero; same for R dot V)
float x = dot(normalizedShadowRay[0],normalizedNormalAtHitPoint[0]);
if(x < 0)
x = 0;
V[0].x = - rayDirection.x;
V[0].x = - rayDirection.y;
V[0].x = - rayDirection.z;
normalizedV[0] = normalize(V[0]);
float y = dot(normalizeReflectionRay[0],normalizedV[0]);
if(y < 0)
y = 0;
float ar = totalspheres[sphereIndex].color_diffuse[0] * x;
float br = totalspheres[sphereIndex].color_specular[0] * pow(y,totalspheres[sphereIndex].shininess);
float r = lights[j].color[0] * (ar+br);
//----------------------------------------------------------------------------------
float bg = totalspheres[sphereIndex].color_specular[1] * pow(y,totalspheres[sphereIndex].shininess);
float ag = totalspheres[sphereIndex].color_diffuse[1] * x;
float g = lights[j].color[1] * (ag+bg);
//----------------------------------------------------------------------------------
float bb = totalspheres[sphereIndex].color_specular[2] * pow(y,totalspheres[sphereIndex].shininess);
float ab = totalspheres[sphereIndex].color_diffuse[2] * x;
float b = lights[j].color[2] * (ab+bb);
mycolor[0] = r + ambient_light[0];
mycolor[1] = g + ambient_light[1];
mycolor[2] = b+ ambient_light[2];
return mycolor;
}
}
}
}
}
The code calling trace() looks like :
void draw_scene()
{
//Aspect Ratio
double a = WIDTH / HEIGHT;
double angel = tan(M_PI * 0.5 * fov/ 180);
ray[0].x = 0.0;
ray[0].y = 0.0;
ray[0].z = 0.0;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
unsigned int x,y;
float sx, sy;
for(x=0;x < WIDTH;x++)
{
glPointSize(2.0);
glBegin(GL_POINTS);
for(y=0;y < HEIGHT;y++)
{
sx = (((x + 0.5) / WIDTH) * 2.0 ) - 1;
sy = (((y + 0.5) / HEIGHT) * 2.0 ) - 1;;
sx = sx * angel * a;
sy = sy * angel;
//set ray direction
ray[1].x = sx;
ray[1].y = sy;
ray[1].z = -1;
normalizedRayDirection[0] = normalize(ray[1]);
unsigned char* color = trace(ray[0],normalizedRayDirection[0],spheres);
unsigned char x1 = color[0] * 255;
unsigned char y1 = color[1] * 255;
unsigned char z1 = color[2] * 255;
plot_pixel(x,y,x1 %256,y1%256,z1%256);
}
glEnd();
glFlush();
}
}
There could be many, many problems with the code/understanding.
I haven't taken the time to understand all your code, and I'm definitely not a graphics expert, but I believe the problem you have is called "surface acne". In this case it's probably happening because your shadow rays are intersecting with the object itself. What I did in my code to fix this is add epsilon * hitPoint.normal to the shadow ray origin. This effectively moves the ray away from your object a bit, so they don't intersect.
The value I'm using for epsilon is the square root of 1.19209290 * 10^-7, as that is the square root of a constant called EPSILON that is defined in the particular language I'm using.
What possible reason do you have for doing this (in the non-shadow branch of trace (...)):
V[0].x = - rayDirection.x;
V[0].x = - rayDirection.y;
V[0].x = - rayDirection.z;
You might as well comment out the first two computations since you write the results of each to the same component. I think you probably meant to do this instead:
V[0].x = - rayDirection.x;
V[0].y = - rayDirection.y;
V[0].z = - rayDirection.z;
That said, you should also avoid using GL_POINT primitives to cover a 2x2 pixel quad. Point primitives are not guaranteed to be square, and OpenGL implementations are not required to support any size other than 1.0. In practice, most support 1.0 - ~64.0 but glDrawPixels (...) is a much better way of writing 2x2 pixels, since it skips primitive assembly and the above mentioned limitations. You are using immediate mode in this example anyway, so glRasterPos (...) and glDrawPixels (...) are still a valid approach.
It seems you are implementing the formula here, but you deviate at the end from the direction the article takes.
First the article warns that D & b can be very close in value, so that -b + D gets you a very limited number. They suggest an alternative.
Also, you are testing that both t0 & t1 > 0. This doesn't have to be true for you to hit the sphere, you could be inside of it (though you obviously should not be in your test scene).
Finally, I would add a test at the beginning to confirm that the direction vector is normalized. I've messed that up more than once in my renderers.

Assign multiple values to array in C

Is there any way to do this in a condensed form?
GLfloat coordinates[8];
...
coordinates[0] = 1.0f;
coordinates[1] = 0.0f;
coordinates[2] = 1.0f;
coordinates[3] = 1.0f;
coordinates[4] = 0.0f;
coordinates[5] = 1.0f;
coordinates[6] = 0.0f;
coordinates[7] = 0.0f;
return coordinates;
Something like coordinates = {1.0f, ...};?
If you really to assign values (as opposed to initialize), you can do it like this:
GLfloat coordinates[8];
static const GLfloat coordinates_defaults[8] = {1.0f, 0.0f, 1.0f ....};
...
memcpy(coordinates, coordinates_defaults, sizeof(coordinates_defaults));
return coordinates;
Although in your case, just plain initialization will do, there's a trick to wrap the array into a struct (which can be initialized after declaration).
For example:
struct foo {
GLfloat arr[10];
};
...
struct foo foo;
foo = (struct foo) { .arr = {1.0, ... } };
The old-school way:
GLfloat coordinates[8];
...
GLfloat *p = coordinates;
*p++ = 1.0f; *p++ = 0.0f; *p++ = 1.0f; *p++ = 1.0f;
*p++ = 0.0f; *p++ = 1.0f; *p++ = 0.0f; *p++ = 0.0f;
return coordinates;
You can use:
GLfloat coordinates[8] = {1.0f, ..., 0.0f};
but this is a compile-time initialisation - you can't use that method in the current standard to re-initialise (although I think there are ways to do it in the upcoming standard, which may not immediately help you).
The other two ways that spring to mind are to blat the contents if they're fixed:
GLfloat base_coordinates[8] = {1.0f, ..., 0.0f};
GLfloat coordinates[8];
:
memcpy (coordinates, base_coordinates, sizeof (coordinates));
or provide a function that looks like your initialisation code anyway:
void setCoords (float *p0, float p1, ..., float p8) {
p0[0] = p1; p0[1] = p2; p0[2] = p3; p0[3] = p4;
p0[4] = p5; p0[5] = p6; p0[6] = p7; p0[7] = p8;
}
:
setCoords (coordinates, 1.0f, ..., 0.0f);
keeping in mind those ellipses (...) are placeholders, not things to literally insert in the code.
I went with an array initialization method:
#include <stdarg.h>
void int_array_init(int *a, const int ct, ...) {
va_list args;
va_start(args, ct);
for(int i = 0; i < ct; ++i) {
a[i] = va_arg(args, int);
}
va_end(args);
}
called like,
const int node_ct = 8;
int expected[node_ct];
int_array_init(expected, node_ct, 1, 3, 4, 2, 5, 6, 7, 8);
The C99 array initialization, like this:
const int node_ct = 8;
const int expected[node_ct] = { 1, 3, 4, 2, 5, 6, 7, 8 };
And in the configure.ac:
AC_PROG_CC_C99
had the compiler on my dev box perfectly happy. The compiler on the server complained with:
error: variable-sized object may not be initialized
const int expected[node_ct] = { 1, 3, 4, 2, 5, 6, 7, 8 };
and
warning: excess elements in array initializer
const int expected[node_ct] = { 1, 3, 4, 2, 5, 6, 7, 8 };
for each element
It doesn't complain at all about, for example:
int expected[] = { 1, 2, 3, 4, 5 };
I like the check on size, and that the varargs support is acting more robustly than the support for the array initializer.
Find PR with sample code at https://github.com/wbreeze/davenport/pull/15/files
Regarding https://stackoverflow.com/a/3535455/608359 from #paxdiablo, I liked it; but, felt insecure about having the number of times the initializaion pointer advances synchronized with the number of elements allocated to the array. Worst case, the initializing pointer moves beyond the allocated length. As such, the diff in the PR contains,
int expected[node_ct];
- int *p = expected;
- *p++ = 1; *p++ = 2; *p++ = 3; *p++ = 4;
+ int_array_init(expected, node_ct, 1, 2, 3, 4);
The int_array_init method will safely assign junk if the number of
arguments is fewer than the node_ct. The junk assignment ought to be easier
to catch and debug.
Exactly, you nearly got it:
GLfloat coordinates[8] = {1.0f, ..., 0.0f};
If you are doing these same assignments a lot in your program and want a shortcut, the most straightforward solution might be to just add a function
static inline void set_coordinates(
GLfloat coordinates[static 8],
GLfloat c0, GLfloat c1, GLfloat c2, GLfloat c3,
GLfloat c4, GLfloat c5, GLfloat c6, GLfloat c7)
{
coordinates[0] = c0;
coordinates[1] = c1;
coordinates[2] = c2;
coordinates[3] = c3;
coordinates[4] = c4;
coordinates[5] = c5;
coordinates[6] = c6;
coordinates[7] = c7;
}
and then simply call
GLfloat coordinates[8];
// ...
set_coordinates(coordinates, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f);
typedef struct{
char array[4];
}my_array;
my_array array = { .array = {1,1,1,1} }; // initialisation
void assign(my_array a)
{
array.array[0] = a.array[0];
array.array[1] = a.array[1];
array.array[2] = a.array[2];
array.array[3] = a.array[3];
}
char num = 5;
char ber = 6;
int main(void)
{
printf("%d\n", array.array[0]);
// ...
// this works even after initialisation
assign((my_array){ .array = {num,ber,num,ber} });
printf("%d\n", array.array[0]);
// ....
return 0;
}

Resources