what is wrong with my 2D Interpolation C code - c

#include <GL/glut.h>
#include <GL/gl.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 200
typedef struct vertex{
float x; // x position of the point
float y; // y position of the point
float r; // red color component of the point
float g; // green color component of the point
float b; // blue color component of the point
char isVisited;
}Vertex;
Vertex *borderLines,*interPolationLines;
int vertex_Count;// total vertex
int counter;//counts matched y coordinates
FILE *f,*g;
void readTotalVertexCount(){
if((f = fopen("vertex.txt","r"))==NULL){
printf("File could not been read\n");
return ;
}
fscanf(f,"%d",&vertex_Count);
/*if((g = fopen("points.txt","w"))==NULL){
return ;
}*/
}
void readVertexCoordinatesFromFile(){
Vertex v[vertex_Count];
borderLines = (Vertex *)calloc(N*vertex_Count,sizeof(Vertex));
interPolationLines = (Vertex *)calloc(N*N*(vertex_Count-1),sizeof(Vertex));
int i = 0;int j;
//read vertexes from file
while(i<vertex_Count){
fscanf(f,"%f",&(v[i].x));
fscanf(f,"%f",&(v[i].y));
fscanf(f,"%f",&(v[i].r));
fscanf(f,"%f",&(v[i].g));
fscanf(f,"%f",&(v[i].b));
//printf("%f %f \n",v[i].x,v[i].y);
i++;
}
Vertex *borderLine,*temp;
float k,landa;
// draw border line actually I am doing 1D Interpolation with coordinates of my vertexes
for (i = 0;i < vertex_Count;i++){
int m = i+1;
if(m==vertex_Count)
m = 0;
borderLine = borderLines + i*N;
for(j = 0;j < N; j++){
k = (float)j/(N - 1);
temp = borderLine + j;
landa = 1-k;
//finding 1D interpolation coord. actually they are borders of my convex polygon
temp->x = v[i].x*landa + v[m].x*k;
temp->y = v[i].y*landa + v[m].y*k;
temp->r = v[i].r*landa + v[m].r*k;
temp->g = v[i].g*landa + v[m].g*k;
temp->b = v[i].b*landa + v[m].b*k;
temp->isVisited = 'n'; // I didn't visit this point yet
//fprintf(g,"%f %f %f %f %f\n",temp->x,temp->y,temp->r,temp->g,temp->b);
}
}
/* here is actual place I am doing 2D Interpolation
I am traversing along the border of the convex polygon and finding the points have the same y coordinates
Between those two points have same y coord. I am doing 1D Interpolation*/
int a;counter = 0;
Vertex *searcherBorder,*wantedBorder,*interPolationLine;
int start = N*(vertex_Count); int finish = N*vertex_Count;
for(i = 0;i< start ;i++){
searcherBorder = i + borderLines;
for(j = i - i%N + N +1; j< finish; j++){
wantedBorder = j + borderLines;
if((searcherBorder->y)==(wantedBorder->y) && searcherBorder->isVisited=='n' && wantedBorder->isVisited=='n'){
//these points have been visited
searcherBorder->isVisited = 'y';
wantedBorder->isVisited = 'y';
interPolationLine = interPolationLines + counter*N;
//counter variable counts the points have same y coordinates.
counter++;
//printf("%d %d %d\n",i,j,counter);
//same as 1D ınterpolation
for(a= 0;a< N;a++){
k = (float)a/(N - 1);
temp = interPolationLine + a;
landa = 1-k;
temp->x = (wantedBorder->x)*landa + (searcherBorder->x)*k;
temp->y = (wantedBorder->y)*landa + (searcherBorder->y)*k;
temp->r = (wantedBorder->r)*landa + (searcherBorder->r)*k;
temp->g = (wantedBorder->g)*landa + (searcherBorder->g)*k;
/*if(temp->x==temp->y)
printf("%f %f \n",wantedBorder->x,searcherBorder->x);*/
temp->b = (wantedBorder->b)*landa + (searcherBorder->b)*k;
}
}
}
}
fclose(f);
}
void display(void){
glClear(GL_COLOR_BUFFER_BIT);
glColor3f(1.0,1.0,1.0);
int i,j;
Vertex *interPol,*temp;
glBegin (GL_POINTS);
for(i = 0;i< counter;i++){
interPol = interPolationLines + i*N;
for(j = 0;j< N;j++){
temp = interPol + j;
glColor3f((temp)->r,(temp)->g,(temp)->b);
//fprintf(g,"%f %f \n",(temp)->x,(temp)->y);
glVertex2f ((temp)->x,(temp)->y);
}
}
//printf("%d\n",counter);
fclose(g);
glEnd ();
glFlush();
}
void init(void){
glutInitDisplayMode( GLUT_RGB | GLUT_SINGLE);
glutInitWindowSize(900,500);
glutInitWindowPosition(200,100);
glutCreateWindow("2D InterPolation");
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
glShadeModel(GL_SMOOTH);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0);
}
int main(int argc, char** argv)
{
readTotalVertexCount();
readVertexCoordinatesFromFile();
glutInit(&argc,argv);
init();
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
I am implementing 2D Interpolation of a convex polygon and my code does not care about concav.my code works for some convex polygons but for others fail.For those my code fails it does not draw middle of the polygon.it only draws an upper and lower triangle.it reads vertexes from file vertex.txt and its format:x co,y co,red,green,blue color info of that point like below and for the values below my code fails.Thanks for replies in advance.I will get mad.
7
0.9 0.4 1.0 0.0 1.0
0.8 0.2 1.0 0.0 1.0
0.5 0.1 1.0 0.0 0.0
0.3 0.3 0.0 0.0 1.0
0.3 0.35 0.0 0.0 1.0
0.4 0.4 0.0 1.0 0.0
0.6 0.5 1.0 1.0 1.0

Without fully debugging your program, I'm suspicious of the line that says, for(j = i - i%N + N +1; j< finish; j++){. I don't know exactly what you're intending to do, but it just looks suspicious. Furthermore, I would recommend a different algorithm:
Trace around the polygon
Mark any edges that span the desired y-value
Corner cases aside, there's only a solution if you find exactly two hits.
Calculate the intersection of the edges with the y-value
Perform the x-interpolation
Also, concise questions are better than, "Why doesn't my program work?" Forgive me but it feels like a homework problem.
Note: Should this be a comment instead of an answer? I'm new here...

Related

Displaying in openGL based on a matrix

I am trying to draw some shapes in the openGL window. I draw these shapes based on the values in a particular matrix. I am using glut which has a function glutDisplayFunc that takes 1 parameter, a function callback taking no arguments and returns void. But I need to draw an image on the window based on a matrix which I cannot pass to the function callback.
This is an example code
#include<stdio.h>
#include<GL/glut.h>
#include<math.h>
#define pi 3.142857
void mat()
{
int a[2][2];
//
for(int i=0;i<2;i++)
for (int j = 0; j < 2; ++j)
{
scanf("%d",&a[i][j]);
}
}
// function to initialize
void myInit (void)
{
glClearColor(0.0, 0.0, 0.0, 1.0);
glColor3f(0.0, 1.0, 0.0);
glPointSize(1.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(-780, 780, -420, 420);
}
void display (void)
{
glClear(GL_COLOR_BUFFER_BIT);
glBegin(GL_POINTS);
float x, y, i;
for ( i = 0; i < (2 * pi); i += 0.001)
{
x = 200 * cos(i);
y = 200 * sin(i);
glVertex2i(x, y);
}
glEnd();
glFlush();
}
int main (int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
// giving window size in X- and Y- direction
glutInitWindowSize(1366, 768);
glutInitWindowPosition(0, 0);
glutCreateWindow("Circle Drawing");
myInit();
glutDisplayFunc(display);
glutMainLoop();
}
I need to be able to use the matrix a in function mat to define the center of 2 circles. How do I draw the window from within the mat function?
Edit:included code and fixed some typos
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT);
//-----------
float a[4][4] = {
1,0,0,0,
0,1,0,0,
0,0,1,0,
0,0,0,1 };
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glLoadMatrixf((float*)a);
//----------
glBegin(GL_POINTS);
float x, y, i;
for (i = 0; i < (2 * pi); i += 0.001)
{
x = 200 * cos(i);
y = 200 * sin(i);
glVertex2i(x, y);
}
glEnd();
glFlush();
}
In general you can load the current model view matrix, by setting the GL_MODELVIEW matrix mode (glMatrixMode), and loading the matrix by glLoadMatrixf.
Optionally the matrix can be multiplied to the current matrix by glMultMatrix.
But in both cases, the matrix has to be 4x4 Transformation matrix. The parameter to both functions is a pointer to an array of 16 floats respectively an 2 dimensional 4x4 float-array.
Init a 4x4 Identity matrix and read the upper left 2x2, to set up a rotation matrix around the z-axis:
Further, I recommend to read an rotation angle in degree and to calculate the rotation axis by the trigonometric functions sin respectively cos.
Finally read the xy translation components:
#define _USE_MATH_DEFINES
#include <math.h>
float a[4][4];
void mat()
{
// init identity matrix
for(int i = 0; i < 4; i++)
for (int j = 0; j < 4; ++j)
a[i][j] = (i==j) ? 1.0f : 0.0f;
// read the angle in degrees
float angle_degree;
scanf("%f", &angle_degree);
// convert the angle to radian
float angle_radiant = angle_degree * (float)M_PI / 180.0f;
// set rotation around z-axis
float cos_ang = cos(angle_radiant);
float sin_ang = sin(angle_radiant);
a[0][0] = cos_ang;
a[0][1] = -sin_ang;
a[1][0] = sin_ang;
a[1][1] = cos_ang;
// read translation
scanf("%f", &a[3][0]);
scanf("%f", &a[3][1]);
}
void display (void)
{
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(&a[0][0]);
// [...]
}

Ray tracing a Hemisphere

I am currently working on a basic raytracing program using C, and i have managed to so some simple shapes ex, sphere/box/plane/cone/..., and i also did some shading to them using phong illumination.
But my question is that i can get a hang of how i can ray trace a Hemisphere , like is there a set equation that define the Hemisphere if so enlighten me on it because i couldn't find any , or is there a set method to do it that i couldn't figure out.
I have also tried to tried to cut the sphere with a plane and only show the only the top half but it didn't work (I am still new to all this so my understanding may be wrong).
Edit: Ok, I am sorry because i am really new to all this but here is what i have tryied.
#include "raytacing.h"
t_env *init_sphere(t_env *e)
{
//sphere position and radius
e->sph.posi.x = 0;
e->sph.posi.y = 0;
e->sph.posi.z = -1;
e->sph.rad = 0;
e->sph.color = (t_color){255, 255, 128);
return (e);
}
t_env *init_plane(t_env *e)
{
//plane position
e->plane.posi.x = 0;
e->olane.posi.y = -0.5;
e->plane.posi.z = 0;
//plane normal
e->plane.norm.x = 0;
e->olane.norm.y = 1;
e->plane.norm.z = 0;
e->plane.color = (t_color){0, 255, 0);
return (e);
}
double inter_plane(t_env *e, double *t) //calculating plane intersection
{
t_vect dist;
double norm;
norm = dot(e->plane.normal, e->r.direction);
if (fabs(norm) > 1e-6)
{
dist = vect_sub(e->plane.posi, e->r.start);
e->t0 = dot(dist, e->plane.normal) / norm;
if (e->t0 < *t && e->t0 > 1e-6)
{
*t = e->t0;
return (1);
}
else
return (0);
}
return (0);
}
double inter_sph(t_env *e, double *t) //calculating sphere intersection
{
double delta;
double sqrtd;
t_vect dist;
e->a = dot(e->r.direction, e->r.direction);
dist = vect_sub(e->r.start, e->sph.posi);
e->b = 2 * dot(dist, e->r.direction);
e->c = dot(dist, dist) - e->sph.rad * e->sph.rad;
delta = e->b * e->b - 4 * e->a * e->c;
if (delta < 0)
return (0);
sqrtd = sqrt(delta);
e->t0 = (-e->b + sqrtd) / (2 * e->a);
e->t1 = (-e->b - sqrtd) / (2 * e->a);
if (e->t0 > e->t1)
e->t0 = e->t1;
if ((e->t0 > 1e-6) && (e->t0 < *t))
{
*t = e->t0;
return (1);
}
else
return (0);
}
double inter_hemisphere(t_env *e) //calculating hemisphere intersection
{
t_vect hit_normal;
if (inter_sph(e, &e->t) == 1)
{
hit_normal = vect_add(e->r.start, vect_scalaire(e->t, e->r.direction));
hit_normal = vect_normalize(hit_normal);
if (inter_plane(e, &(e->t)) == 1)
{
if (dot(e->plane.normal, hit_normal) < 0)
return (1);
return (0);
}
}
return (0);
}
the e->t is . supposed to be the closest distance to the camera so that i get an exact display of close and far objects
And here i tried to apply what Spektre said and got some thing displayed and look like something like this:
And when i try to rotate it i get this:
Edit2 : After using Spektre Method I got a functional Intersection of a Hemisphere and the intersection look something like this.
double inter_hemisphere(t_env *e, double *t)
{
double delta;
double sqrtd;
t_vect dist;
e->a = dot(e->r.direction, e->r.direction);
dist = vect_sub(e->r.start, e->sph.posi);
e->b = 2 * dot(dist, e->r.direction);
e->c = dot(dist, dist) - e->sph.rad * e->sph.rad;
delta = e->b * e->b - 4 * e->a * e->c;
if (delta < 0)
return (0);
sqrtd = sqrt(delta);
e->t0 = (-e->b + sqrtd) / (2 * e->a);
e->t1 = (-e->b - sqrtd) / (2 * e->a);
t_vect v2;
v2 = vect_add(e->r.start, vect_sub(vect_scalaire(e->t0, e->r.direction), e->sph.posi));
if (dot(e->plane.normal, v2) > 0.0)
e->t0 =-1.0;
v2 = vect_add(e->r.start, vect_sub(vect_scalaire(e->t1, e->r.direction), e->sph.posi));
if (dot(e->plane.normal, v2) > 0.0)
e->t1 =-1.0;
if (e->t0 < 0.0)
e->t0 = e->t1;
if (e->t1 < 0.0)
e->t1 = e->t0;
double tt;
tt = fmin(e->t0, e->t1);
if (tt <= 0.0)
tt = fmax(e->t0, e->t1);
if (tt > 1e-6 && tt < e->t)
{
*t = tt;
return (1);
}
return (0);
}
And here is the Result:
The simplest way is to cut your sphere by a plane.
If you have plane normal than any direction (point on sphere - sphere center) with the same direction to normal is cut off. Simply by this condition:
dot(point on sphere - sphere center , plane normal ) > 0.0
But do not forget to test both intersections of ray and sphere as the closest one can be on the other side of plane ...
I tried to implement this into mine GLSL Ray tracer:
Reflection and refraction impossible without recursive ray tracing?
And come up with this updated fragment shaders:
Vertex (no change):
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
uniform float aspect;
uniform float focal_length;
uniform mat4x4 tm_eye;
layout(location=0) in vec2 pos;
out smooth vec2 txt_pos; // frag position on screen <-1,+1> for debug prints
out smooth vec3 ray_pos; // ray start position
out smooth vec3 ray_dir; // ray start direction
//------------------------------------------------------------------
void main(void)
{
vec4 p;
txt_pos=pos;
// perspective projection
p=tm_eye*vec4(pos.x/aspect,pos.y,0.0,1.0);
ray_pos=p.xyz;
p-=tm_eye*vec4(0.0,0.0,-focal_length,1.0);
ray_dir=normalize(p.xyz);
gl_Position=vec4(pos,0.0,1.0);
}
//------------------------------------------------------------------
Fragment (added hemispheres):
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
// Ray tracer ver: 1.000
//------------------------------------------------------------------
in smooth vec3 ray_pos; // ray start position
in smooth vec3 ray_dir; // ray start direction
uniform float n0; // refractive index of camera origin
uniform int fac_siz; // square texture x,y resolution size
uniform int fac_num; // number of valid floats in texture
uniform sampler2D fac_txr; // scene mesh data texture
out layout(location=0) vec4 frag_col;
//---------------------------------------------------------------------------
#define _reflect
#define _refract
//---------------------------------------------------------------------------
void main(void)
{
const vec3 light_dir=normalize(vec3(0.1,0.1,1.0));
const float light_iamb=0.1; // dot offset
const float light_idir=0.5; // directional light amplitude
const vec3 back_col=vec3(0.2,0.2,0.2); // background color
const float _zero=1e-6; // to avoid intrsection with start point of ray
const int _fac_triangles =0; // r,g,b,a, n, triangle count, { x0,y0,z0,x1,y1,z1,x2,y2,z2 }
const int _fac_spheres =1; // r,g,b,a, n, sphere count, { x,y,z,r }
const int _fac_hemispheres=2; // r,g,b,a, n, hemisphere count,{ x,y,z,r,nx,ny,nz }
// ray scene intersection
struct _ray
{
dvec3 pos,dir,nor;
vec3 col;
float refl,refr;// reflection,refraction intensity coeficients
float n0,n1; // refaction index (start,end)
double l; // ray length
int lvl,i0,i1; // recursion level, reflect, refract
};
const int _lvls=4;
const int _rays=(1<<_lvls)-1;
_ray ray[_rays]; int rays;
dvec3 v0,v1,v2,pos;
vec3 c;
float refr,refl,n1;
double tt,t,a;
int i0,ii,num,id;
// fac texture access
vec2 st; int i,j; float ds=1.0/float(fac_siz-1);
#define fac_get texture(fac_txr,st).r; st.s+=ds; i++; j++; if (j==fac_siz) { j=0; st.s=0.0; st.t+=ds; }
// enque start ray
ray[0].pos=ray_pos;
ray[0].dir=normalize(ray_dir);
ray[0].nor=vec3(0.0,0.0,0.0);
ray[0].refl=0.0;
ray[0].refr=0.0;
ray[0].n0=n0;
ray[0].n1=1.0;
ray[0].l =0.0;
ray[0].lvl=0;
ray[0].i0=-1;
ray[0].i1=-1;
rays=1;
// loop all enqued rays
for (i0=0;i0<rays;i0++)
{
// loop through all objects
// find closest forward intersection between them and ray[i0]
// strore it to ray[i0].(nor,col)
// strore it to pos,n1
t=tt=-1.0; ii=1; ray[i0].l=0.0;
ray[i0].col=back_col;
pos=ray[i0].pos; n1=n0;
for (st=vec2(0.0,0.0),i=j=0;i<fac_num;)
{
c.r=fac_get; // RGBA
c.g=fac_get;
c.b=fac_get;
refl=fac_get;
refr=fac_get;
n1=fac_get; // refraction index
a=fac_get; id=int(a); // object type
a=fac_get; num=int(a); // face count
if (id==_fac_triangles)
for (;num>0;num--)
{
v0.x=fac_get; v0.y=fac_get; v0.z=fac_get;
v1.x=fac_get; v1.y=fac_get; v1.z=fac_get;
v2.x=fac_get; v2.y=fac_get; v2.z=fac_get;
dvec3 e1,e2,n,p,q,r;
double t,u,v,det,idet;
//compute ray triangle intersection
e1=v1-v0;
e2=v2-v0;
// Calculate planes normal vector
p=cross(ray[i0].dir,e2);
det=dot(e1,p);
// Ray is parallel to plane
if (abs(det)<1e-8) continue;
idet=1.0/det;
r=ray[i0].pos-v0;
u=dot(r,p)*idet;
if ((u<0.0)||(u>1.0)) continue;
q=cross(r,e1);
v=dot(ray[i0].dir,q)*idet;
if ((v<0.0)||(u+v>1.0)) continue;
t=dot(e2,q)*idet;
if ((t>_zero)&&((t<=tt)||(ii!=0)))
{
ii=0; tt=t;
// store color,n ...
ray[i0].col=c;
ray[i0].refl=refl;
ray[i0].refr=refr;
// barycentric interpolate position
t=1.0-u-v;
pos=(v0*t)+(v1*u)+(v2*v);
// compute normal (store as dir for now)
e1=v1-v0;
e2=v2-v1;
ray[i0].nor=cross(e1,e2);
}
}
if (id==_fac_spheres)
for (;num>0;num--)
{
float r;
v0.x=fac_get; v0.y=fac_get; v0.z=fac_get; r=fac_get;
// compute l0 length of ray(p0,dp) to intersection with sphere(v0,r)
// where rr= r^-2
double aa,bb,cc,dd,l0,l1,rr;
dvec3 p0,dp;
p0=ray[i0].pos-v0; // set sphere center to (0,0,0)
dp=ray[i0].dir;
rr = 1.0/(r*r);
aa=2.0*rr*dot(dp,dp);
bb=2.0*rr*dot(p0,dp);
cc= rr*dot(p0,p0)-1.0;
dd=((bb*bb)-(2.0*aa*cc));
if (dd<0.0) continue;
dd=sqrt(dd);
l0=(-bb+dd)/aa;
l1=(-bb-dd)/aa;
if (l0<0.0) l0=l1;
if (l1<0.0) l1=l0;
t=min(l0,l1); if (t<=_zero) t=max(l0,l1);
if ((t>_zero)&&((t<=tt)||(ii!=0)))
{
ii=0; tt=t;
// store color,n ...
ray[i0].col=c;
ray[i0].refl=refl;
ray[i0].refr=refr;
// position,normal
pos=ray[i0].pos+(ray[i0].dir*t);
ray[i0].nor=pos-v0;
}
}
if (id==_fac_hemispheres)
for (;num>0;num--)
{
float r;
v0.x=fac_get; v0.y=fac_get; v0.z=fac_get; r=fac_get;
v1.x=fac_get; v1.y=fac_get; v1.z=fac_get;
// compute l0 length of ray(p0,dp) to intersection with sphere(v0,r)
// where rr= r^-2
double aa,bb,cc,dd,l0,l1,rr;
dvec3 p0,dp;
p0=ray[i0].pos-v0; // set sphere center to (0,0,0)
dp=ray[i0].dir;
rr = 1.0/(r*r);
aa=2.0*rr*dot(dp,dp);
bb=2.0*rr*dot(p0,dp);
cc= rr*dot(p0,p0)-1.0;
dd=((bb*bb)-(2.0*aa*cc));
if (dd<0.0) continue;
dd=sqrt(dd);
l0=(-bb+dd)/aa;
l1=(-bb-dd)/aa;
// test both hits-v0 against normal v1
v2=ray[i0].pos+(ray[i0].dir*l0)-v0; if (dot(v1,v2)>0.0) l0=-1.0;
v2=ray[i0].pos+(ray[i0].dir*l1)-v0; if (dot(v1,v2)>0.0) l1=-1.0;
if (l0<0.0) l0=l1;
if (l1<0.0) l1=l0;
t=min(l0,l1); if (t<=_zero) t=max(l0,l1);
if ((t>_zero)&&((t<=tt)||(ii!=0)))
{
ii=0; tt=t;
// store color,n ...
ray[i0].col=c;
ray[i0].refl=refl;
ray[i0].refr=refr;
// position,normal
pos=ray[i0].pos+(ray[i0].dir*t);
ray[i0].nor=pos-v0;
}
}
}
ray[i0].l=tt;
ray[i0].nor=normalize(ray[i0].nor);
// split ray from pos and ray[i0].nor
if ((ii==0)&&(ray[i0].lvl<_lvls-1))
{
t=dot(ray[i0].dir,ray[i0].nor);
// reflect
#ifdef _reflect
if ((ray[i0].refl>_zero)&&(t<_zero)) // do not reflect inside objects
{
ray[i0].i0=rays;
ray[rays]=ray[i0];
ray[rays].lvl++;
ray[rays].i0=-1;
ray[rays].i1=-1;
ray[rays].pos=pos;
ray[rays].dir=ray[rays].dir-(2.0*t*ray[rays].nor);
ray[rays].n0=ray[i0].n0;
ray[rays].n1=ray[i0].n0;
rays++;
}
#endif
// refract
#ifdef _refract
if (ray[i0].refr>_zero)
{
ray[i0].i1=rays;
ray[rays]=ray[i0];
ray[rays].lvl++;
ray[rays].i0=-1;
ray[rays].i1=-1;
ray[rays].pos=pos;
t=dot(ray[i0].dir,ray[i0].nor);
if (t>0.0) // exit object
{
ray[rays].n0=ray[i0].n0;
ray[rays].n1=n0;
if (i0==0) ray[i0].n1=n1;
v0=-ray[i0].nor; t=-t;
}
else{ // enter object
ray[rays].n0=n1;
ray[rays].n1=ray[i0].n0;
ray[i0 ].n1=n1;
v0=ray[i0].nor;
}
n1=ray[i0].n0/ray[i0].n1;
tt=1.0-(n1*n1*(1.0-t*t));
if (tt>=0.0)
{
ray[rays].dir=(ray[i0].dir*n1)-(v0*((n1*t)+sqrt(tt)));
rays++;
}
}
#endif
}
else if (i0>0) // ignore last ray if nothing hit
{
ray[i0]=ray[rays-1];
rays--; i0--;
}
}
// back track ray intersections and compute output color col
// lvl is sorted ascending so backtrack from end
for (i0=rays-1;i0>=0;i0--)
{
// directional + ambient light
t=abs(dot(ray[i0].nor,light_dir)*light_idir)+light_iamb;
t*=1.0-ray[i0].refl-ray[i0].refr;
ray[i0].col.rgb*=float(t);
// reflect
ii=ray[i0].i0;
if (ii>=0) ray[i0].col.rgb+=ray[ii].col.rgb*ray[i0].refl;
// refract
ii=ray[i0].i1;
if (ii>=0) ray[i0].col.rgb+=ray[ii].col.rgb*ray[i0].refr;
}
frag_col=vec4(ray[0].col,1.0);
}
//---------------------------------------------------------------------------
The Vertex shader just creates the Ray position and direction which is interpolated by GPU and then Fragment shader handles each ray (per pixel).
I use this scene:
// init mesh raytracer
ray.gl_init();
ray.beg();
// r g b rfl rfr n
ray.add_material(1.0,0.7,0.1,0.3,0.0,_n_glass); ray.add_hemisphere( 0.0, 0.0, 2.0,0.5, 0.0, 0.0, 1.0);
ray.add_material(1.0,1.0,1.0,0.3,0.0,_n_glass); ray.add_box ( 0.0, 0.0, 6.0,9.0,9.0,0.1);
ray.add_material(1.0,1.0,1.0,0.1,0.8,_n_glass); ray.add_sphere ( 0.0, 0.0, 0.5,0.5);
ray.add_material(1.0,0.1,0.1,0.3,0.0,_n_glass); ray.add_sphere (+2.0, 0.0, 2.0,0.5);
ray.add_material(0.1,1.0,0.1,0.3,0.0,_n_glass); ray.add_box (-2.0, 0.0, 2.0,0.5,0.5,0.5);
ray.add_material(0.1,0.1,1.0,0.3,0.0,_n_glass);
ray.add_tetrahedron
(
0.0, 0.0, 3.0,
-1.0,-1.0, 4.0,
+1.0,-1.0, 4.0,
0.0,+1.0, 4.0
);
ray.end();
containing single yellow hemisphere at (0.0, 0.0, 2.0) with radius r=0.5 and plane normal (0.0, 0.0, 1.0). Rotation of the object can by done simply by rotating the plane normal.
And this is preview:
As you can see hemisphere is working by just cutting with a plane ... The only important code from above for you is this (see the *** comments):
if (id==_fac_hemispheres) // *** ignore
for (;num>0;num--) // *** ignore
{
float r;
// *** here v0 is center, v1 is plane normal and r is radius
v0.x=fac_get; v0.y=fac_get; v0.z=fac_get; r=fac_get;
v1.x=fac_get; v1.y=fac_get; v1.z=fac_get;
// *** this is ray/ellipsoid intersection returning l0,l1 ray distances for both hits
// compute l0 length of ray(p0,dp) to intersection with sphere(v0,r)
// where rr= r^-2
double aa,bb,cc,dd,l0,l1,rr;
dvec3 p0,dp;
p0=ray[i0].pos-v0; // set sphere center to (0,0,0)
dp=ray[i0].dir;
rr = 1.0/(r*r);
aa=2.0*rr*dot(dp,dp);
bb=2.0*rr*dot(p0,dp);
cc= rr*dot(p0,p0)-1.0;
dd=((bb*bb)-(2.0*aa*cc));
if (dd<0.0) continue;
dd=sqrt(dd);
l0=(-bb+dd)/aa;
l1=(-bb-dd)/aa;
// *** this thro away hits on wrong side of plane
// test both hits-v0 against normal v1
v2=ray[i0].pos+(ray[i0].dir*l0)-v0; if (dot(v1,v2)>0.0) l0=-1.0;
v2=ray[i0].pos+(ray[i0].dir*l1)-v0; if (dot(v1,v2)>0.0) l1=-1.0;
// *** this is just using closer valid hit
if (l0<0.0) l0=l1;
if (l1<0.0) l1=l0;
t=min(l0,l1); if (t<=_zero) t=max(l0,l1);
if ((t>_zero)&&((t<=tt)||(ii!=0)))
{
ii=0; tt=t;
// store color,n ...
ray[i0].col=c;
ray[i0].refl=refl;
ray[i0].refr=refr;
// position,normal
pos=ray[i0].pos+(ray[i0].dir*t);
ray[i0].nor=pos-v0;
}
}
I used mine ray and ellipsoid intersection accuracy improvement as it returns both hits not just the first one.
If you cross check the spheres and hemispheres you will see I just added these two lines:
v2=ray[i0].pos+(ray[i0].dir*l0)-v0; if (dot(v1,v2)>0.0) l0=-1.0;
v2=ray[i0].pos+(ray[i0].dir*l1)-v0; if (dot(v1,v2)>0.0) l1=-1.0;
which just converts ray distances to hit positions and computing the condition mentioned above...

Translating a single object opengl

I am writing code to draw the figure
but my code gives
as you can see the middle circle is missing.
My code:
#include <stdio.h>
#include <stdlib.h>
#include <GL/glut.h>
#include <math.h>
float width, height, r = 0.3, change = 0;
void draw(float tx, float ty)
{
glBegin(GL_LINE_LOOP);
for(int i = 1; i <= 1200; i++)
{
float x1, y1, theta;
theta = (2 * 3.14159 * i) / 1200;
x1 = r * cosf(theta) * height / width;
y1 = r * sinf(theta);
glVertex3f(x1 , y1 ,0);
}
glEnd();
glTranslatef(tx, ty, 0);
}
void display()
{
float p[6][2];
int j = 0;
if (change == 0)
change = 1;
else if (change == 1)
change = 0;
width = glutGet(GLUT_WINDOW_WIDTH);
height = glutGet(GLUT_WINDOW_HEIGHT) ;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glColor3f(1.0f, 0.0f, 0.0f);
glMatrixMode(GL_MODELVIEW);
glBegin(GL_LINE_LOOP);
for(int i = 1; i <= 1200; i++)
{
float theta, x1, y1;
theta = (2 * 3.14159 * i) / 1200;
x1 = r * cosf(theta) * height / width;
y1 = r * sinf(theta);
glVertex3f(x1, y1, 0);
if (i == 100 | i == 300 | i == 500 | i == 700 | i == 900 | i == 1100)
{
if(change == 0){
p[j][0] = x1;
p[j][1] = y1;
j++;
}
}
}
glEnd();
for(int i=0;i<6 && change == 0;i++){
draw(p[i][0],p[i][1]);
}
glutSwapBuffers();
}
void main(int argc,char *argv[])
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(700,500);
glutCreateWindow("circles");
glutDisplayFunc(display);
glutMainLoop();
}
Issue is when i translate first circle in draw function the center circle drawn is also translated to that point which is merged with other circle.My doubt is how to translate only one circle not the center one i tried translating by using push and pop matrix but it doesn't work.
Thank you.
glTranslatef() changes the current matrix by appending a translation. So your translations will just accumulate. And since you do not have a transform between the first two circles, they will appear at the same positions. Your program basically does the following:
Draw Circle
draw()
Draw Circle
Move up, right (p[0])
draw()
Draw Circle
Move up (p[1])
draw()
Draw Circle
Move up left (p[2])
...
If you want absolute positioning, you have to reset the transform in between. This can either be done with glLoadIdentity() or with the matrix stack. And be sure to draw after setting the transform.
I guess you know, but in any case a little reminder: The entire matrix stack functionality is deprecated in modern OpenGL and you will need to manage the matrices yourself. I assume, when you do this, everything gets a bit clearer. So I'm not sure if there is a good reason to try to understand the interface of the matrix stack functionalities.
If you want to place each circle at a specific location, you can do something like the following:
void drawCircle()
{
glBegin(GL_LINE_LOOP);
for(int i = 1; i <= 1200; i++)
{
float x1, y1, theta;
theta = (2 * 3.14159 * i) / 1200;
x1 = r * cosf(theta) * height / width;
y1 = r * sinf(theta);
glVertex3f(x1 , y1 ,0);
}
glEnd();
}
void display()
{
// ...
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glColor3f(1.0f, 0.0f, 0.0f);
glMatrixMode(GL_MODELVIEW);
drawCircle();
for(int i = 0; i < 6; ++i)
{
float angle = M_PI / 3 * i;
float tx = r * sin(angle);
float ty = r * cos(angle);
glPushMatrix(); //save the current matrix
glTranslatef(tx, ty, 0); //move to the desired location
drawCircle();
glPopMatrix(); //restore the old matrix
}
glutSwapBuffers();
}

3D identity matrix to correctly set vertices

Im playing around with matrices, with a view to doing 3D transformation in GDI (for the fun of it). At the moment i'm checking that im getting the right values from identity matrix given a representation of four vertices arranged in a square. I've been scratching my head as to why it's not giving expected output. I have done my research but can't see what i am doing wrong here.
Here's my definition of matrix.
typedef struct m{
float _m01, _m05, _m09, _m13;
float _m02, _m06, _m10, _m14;
float _m03, _m07, _m11, _m15;
float _m04, _m08, _m12, _m16;
}mat;
struct m matIdentity(struct m *m1){
m1->_m01 = 1.0; m1->_m05 = 0.0; m1->_m09 = 0.0; m1->_m13 = 0.0;
m1->_m02 = 0.0; m1->_m06 = 1.0; m1->_m10 = 0.0; m1->_m14 = 0.0;
m1->_m03 = 0.0; m1->_m07 = 0.0; m1->_m11 = 1.0; m1->_m15 = 0.0;
m1->_m04 = 0.0; m1->_m08 = 0.0; m1->_m12 = 0.0; m1->_m16 = 1.0;
}
Here's making use of matrix with
struct m matrix;
matIdentity(&matrix);
//represent 4 vertices(x,y,z,w);
float square[4][4] = {
{0.0, 0.0, 0.0, 1.0},
{0.0, 20.0, 0.0, 1.0},
{20.0, 20.0, 0.0, 1.0},
{20.0, 0.0, 0.0, 1.0}
};
float result[4][4];
int i = 0;
for(i = 0; i < 4; i++){
result[i][1] = (matrix._m01 * square[i][0]) + (matrix._m05 * square[i][1]) + (matrix._m09 * square[i][2]) + (matrix._m13 * square[i][3]);
result[i][2] = (matrix._m02 * square[i][0]) + (matrix._m06 * square[i][1]) + (matrix._m10 * square[i][2]) + (matrix._m14 * square[i][3]);
result[i][3] = (matrix._m03 * square[i][0]) + (matrix._m07 * square[i][1]) + (matrix._m11 * square[i][2]) + (matrix._m15 * square[i][3]);
result[i][4] = (matrix._m04 * square[i][0]) + (matrix._m08 * square[i][1]) + (matrix._m12 * square[i][2]) + (matrix._m16 * square[i][3]);
}
char strOutput[500];
sprintf(strOutput,"%f %f %f %f\n %f %f %f %f\n %f %f %f %f\n %f %f %f %f\n ",
result[0][0], result[0][1], result[0][2], result[0][3],
result[1][0], result[1][1], result[1][2], result[1][3],
result[2][0], result[2][1], result[2][2], result[2][3],
result[3][0], result[3][1], result[3][2], result[3][3]
);
I have a feeling the problem is somewhere to do with multiplying a row based representation of vertices using a column major matrix. Can anyone please suggest how i should be doing this.
I don't understand why you don't use array first, then start to use array and iteration, and in the end give up iteration. Please, such program can only cause confusion.
The correct formula is C(i, j)=sigma(A(i, k)*B(k, j), k=1..n), where C=AB and n is 4 for your case.
(e.g., this line should be like: result[i][0] = (matrix._m01 * square[0][i]) + (matrix._m02 * square[1][i]) + (matrix._m03 * square[2][i]) + (matrix._m04 * square[3][i]); )Write a simple nested for-iteration to calculate this...
This is not for one vector, but n vectors....
This is not matrix multiplication. Multiplying a vector by a matrix goes like this:
float mat[4][4];
float vec_in[4];
float vec_out[4];
// todo: initialize values
for (int j = 0; j < 4; ++j)
{
vec_out[j] = 0.0f;
for (int i = 0; i < 4; ++i)
{
vec_out[j] += vec_in[i] * mat[i][j];
}
}

Why is my ray-traced image entirely black?

I had to generate an image that's a black circle, black being (0, 0 , 0) and white being (1, 1, 1), but I keep getting a completely black image. Here's all my code:
#include "cast.h"
#include "collisions.h"
#include <stdio.h>
#include "math.h"
int cast_ray(struct ray r, struct sphere spheres[], int num_spheres)
{
int isFound;
struct maybe_point mp;
isFound = 0;
for (int i = 0; i < num_spheres; i++)
{
mp = sphere_intersection_point(r, spheres[i]);
if (mp.isPoint == 1)
{
isFound = 1;
}
else
{
isFound = 0;
}
}
return isFound;
}
void print_pixel(double a, double b, double c)
{
int i, j, k;
i = a * 255;
j = b * 255;
k = c * 255;
printf("%d %d %d ", i, j, k);
}
void cast_all_rays(double min_x, double max_x, double min_y, double max_y,
int width, int height, struct point eye,
struct sphere spheres[], int num_spheres)
{
double width_interval, height_interval, y, x;
int intersect;
width_interval = (max_x - min_x)/width;
height_interval = (max_y - min_y)/height;
for (y = max_y; y > min_y; y = y - height_interval)
{
for (x = min_x; x < max_x; x = x + width_interval)
{
struct ray r;
r.p = eye;
r.dir.x = x;
r.dir.y = y;
r.dir.z = 0.0;
intersect = cast_ray(r, spheres, num_spheres);
if (intersect != 0)
{
print_pixel (0, 0, 0);
}
else
{
print_pixel (1, 1, 1);
}
}
I already had functions that I know are correct which find whether or not the ray intersects with a sphere. The function that I used to find intersection points was in the function cast_ray.
sphere_intersection_point(r, spheres[i]);
The print_pixel function translates the integer values by multiplying them with the max color value, which is 255.
And the cast_all_rays function casts rays into the whole scene from our eyes (going through all the x coordinates before changing the y). If the ray intersects with a sphere, the pixel is black, thus, forming a black circle in the end.
And here are the limits for the x, y, and radius (NOTE: I'M USING THE PPM FORMAT):
Eye at <0.0, 0.0, -14.0>.
A sphere at <1.0, 1.0, 0.0> with radius 2.0.
A sphere at <0.5, 1.5, -3.0> with radius 0.5.
min_x at -10, max_x at 10, min_y of -7.5, max_y at 7.5, width=1024, and height=768.
I need to generate an image of a black circle, but I keep getting an image that's completely black. I have a feeling that the problem lies inside the cast_all_rays function, but I just can't seem to find what it is. Help is appreciated! Thanks.
And just in case something went wrong with my testing, here's my test.c file for cast_all_rays:
#include "collisions.h"
#include "data.h"
#include "cast.h"
#include <stdio.h>
void cast_all_rays_tests(void)
{
printf("P3\n");
printf("%d %d\n", 1024, 768);
printf("255\n");
double min_x, max_x, min_y, max_y;
int width, height;
struct point eye;
struct sphere spheres[2];
eye.x = 0.0;
eye.y = 0.0;
eye.z = -14.0;
spheres[0].center.x = 1.0;
spheres[0].center.y = 1.0;
spheres[0].center.z = 0.0;
spheres[0].radius = 2.0;
spheres[1].center.x = 0.5;
spheres[1].center.y = 1.5;
spheres[1].center.z = -3.0;
spheres[1].radius = 0.5;
min_x = -10;
max_x = 10;
min_y = -7.5;
max_y = 7.5;
cast_all_rays(min_x, max_x, min_y, max_y, width, height, eye, spheres, num_spheres);
}
int main()
{
cast_all_rays_tests();
return 0;
}
Not sure if this is the problem you're having, but you should only set isFound if you intersect a sphere. Don't set it if you don't intersect. Otherwise your image will be governed by only the last sphere in the list.
if (mp.isPoint == 1)
{
isFound = 1;
}
//else
//{
// isFound = 0;
//}
Since your image is entirely black, it seems like your intersection code is bung or your field of view is too narrow. If you don't have any joy with the above change, maybe you should post details on your x- and y-limits, the eye position, and the position and radius of the sphere.
One more thing I noticed is r.dir.z = 0.0. Do you subtract the eye position from this to get a direction, or is that your true ray direction? Surely you need to give a non-zero z-direction. Normally you set the x and the y based on your view plane and provide a constant z such as 1 or -1.
[edit]
To make it clearer from the comments below, I believe that you haven't correctly set up your ray direction. Instead you have simply set the direction to be the view-plane's pixel position, ignoring the eye position. The following would be more usual:
struct ray r;
r.p = eye;
r.dir.x = x - eye.x;
r.dir.y = y - eye.y;
r.dir.z = 0.0 - eye.z;

Resources