I've been trying to add this post-processing (taken from sebastian lague video which I am trying to convert from unity to threejs) effect that when a ray hits the ocean on my mesh (the blue):
it is colored white (just like in his video):
and everywhere else the original color is returned. But for the life of me can't seem to figure out the problem, I assume my ray origin or direction might be wrong but nothing seems to work, Here's the code that I pass to the ray Sphere intersection function and the function itself.
vec2 raySphere(vec3 centre, float radius, vec3 rayOrigin, vec3 rayDir) {
vec3 offset = rayOrigin - centre;
float a = 1.0; // set to dot(rayDir, rayDir) instead of rayDir may not be normalized
float b = 2.0 * dot(offset, rayDir);
float c = dot(offset, offset) - radius * radius;
float discriminant = b*b-4.0*a*c;
// No intersection: discriminant < 0
// 1 intersection: discriminant == 0
// 2 intersection: discriminant > 0
if(discriminant > 0.0) {
float s = sqrt(discriminant);
float dstToSphereNear = max(0.0, (-b - s) / (2.0 * a));
float dstToSphereFar = (-b + s) / (2.0 * a);
if (dstToSphereFar >= 0.0) {
return vec2(dstToSphereNear, dstToSphereFar-dstToSphereNear);
}
}
return vec2(99999999, 0.0);
}
vec4 ro = inverse(modelMatrix) * vec4(cameraPosition, 1.0);
vec3 rd = normalize(position - ro.xyz);
vec3 oceanCentre = vec3(0.0, 0.0, 0.0);
float oceanRadius = 32.0;
vec2 hitInfo = raySphere(oceanCentre, oceanRadius, ro.xyz, rd);
float dstToOcean = hitInfo.x;
float dstThroughOcean = hitInfo.y;
vec3 rayOceanIntersectPos = ro.xyz + rd * dstToOcean - oceanCentre;
// dst that view ray travels through ocean (before hitting terrain / exiting ocean)
float oceanViewDepth = min(dstThroughOcean, depth - dstToOcean);
vec4 oceanCol;
float alpha;
if(oceanViewDepth > 0.0) {
gl_FragColor = vec4(vec3(1.0), .1);
}
gl_FragColor = texture2D(tDiffuse, vUv);
Can someone help point out where I might be messing up?
Oh wow, we're in the same place while we're stuck at making these shaders. I checked your ray intersectors have small problems. But here is the cases:
What we want if case 3 happens like on your example, so the intersection are in count the problem probably come from no depth correction by doing this:
Make sure your sphere intersection max depth same as the camera.
I do suspect if the last line is the problem, try do this:
vec3 col; // Declare the color
vec2 o = sphere(ro, rd, vec3(0), 1.0); // Ocean Depth.
float oceanViewDepth = min(o.y - o.x, t - o.x);
if(depth > 0.0 && tmax > depth) {
col = originalCol;
}
if(oceanViewDepth > 0.0) {
col = vec3(1);
}
gl_FragColor = vec4(col, 1.0);
If that doesn't work for you I have some finished example for you to checkout at shadertoy
I am currently working on a basic raytracing program using C, and i have managed to so some simple shapes ex, sphere/box/plane/cone/..., and i also did some shading to them using phong illumination.
But my question is that i can get a hang of how i can ray trace a Hemisphere , like is there a set equation that define the Hemisphere if so enlighten me on it because i couldn't find any , or is there a set method to do it that i couldn't figure out.
I have also tried to tried to cut the sphere with a plane and only show the only the top half but it didn't work (I am still new to all this so my understanding may be wrong).
Edit: Ok, I am sorry because i am really new to all this but here is what i have tryied.
#include "raytacing.h"
t_env *init_sphere(t_env *e)
{
//sphere position and radius
e->sph.posi.x = 0;
e->sph.posi.y = 0;
e->sph.posi.z = -1;
e->sph.rad = 0;
e->sph.color = (t_color){255, 255, 128);
return (e);
}
t_env *init_plane(t_env *e)
{
//plane position
e->plane.posi.x = 0;
e->olane.posi.y = -0.5;
e->plane.posi.z = 0;
//plane normal
e->plane.norm.x = 0;
e->olane.norm.y = 1;
e->plane.norm.z = 0;
e->plane.color = (t_color){0, 255, 0);
return (e);
}
double inter_plane(t_env *e, double *t) //calculating plane intersection
{
t_vect dist;
double norm;
norm = dot(e->plane.normal, e->r.direction);
if (fabs(norm) > 1e-6)
{
dist = vect_sub(e->plane.posi, e->r.start);
e->t0 = dot(dist, e->plane.normal) / norm;
if (e->t0 < *t && e->t0 > 1e-6)
{
*t = e->t0;
return (1);
}
else
return (0);
}
return (0);
}
double inter_sph(t_env *e, double *t) //calculating sphere intersection
{
double delta;
double sqrtd;
t_vect dist;
e->a = dot(e->r.direction, e->r.direction);
dist = vect_sub(e->r.start, e->sph.posi);
e->b = 2 * dot(dist, e->r.direction);
e->c = dot(dist, dist) - e->sph.rad * e->sph.rad;
delta = e->b * e->b - 4 * e->a * e->c;
if (delta < 0)
return (0);
sqrtd = sqrt(delta);
e->t0 = (-e->b + sqrtd) / (2 * e->a);
e->t1 = (-e->b - sqrtd) / (2 * e->a);
if (e->t0 > e->t1)
e->t0 = e->t1;
if ((e->t0 > 1e-6) && (e->t0 < *t))
{
*t = e->t0;
return (1);
}
else
return (0);
}
double inter_hemisphere(t_env *e) //calculating hemisphere intersection
{
t_vect hit_normal;
if (inter_sph(e, &e->t) == 1)
{
hit_normal = vect_add(e->r.start, vect_scalaire(e->t, e->r.direction));
hit_normal = vect_normalize(hit_normal);
if (inter_plane(e, &(e->t)) == 1)
{
if (dot(e->plane.normal, hit_normal) < 0)
return (1);
return (0);
}
}
return (0);
}
the e->t is . supposed to be the closest distance to the camera so that i get an exact display of close and far objects
And here i tried to apply what Spektre said and got some thing displayed and look like something like this:
And when i try to rotate it i get this:
Edit2 : After using Spektre Method I got a functional Intersection of a Hemisphere and the intersection look something like this.
double inter_hemisphere(t_env *e, double *t)
{
double delta;
double sqrtd;
t_vect dist;
e->a = dot(e->r.direction, e->r.direction);
dist = vect_sub(e->r.start, e->sph.posi);
e->b = 2 * dot(dist, e->r.direction);
e->c = dot(dist, dist) - e->sph.rad * e->sph.rad;
delta = e->b * e->b - 4 * e->a * e->c;
if (delta < 0)
return (0);
sqrtd = sqrt(delta);
e->t0 = (-e->b + sqrtd) / (2 * e->a);
e->t1 = (-e->b - sqrtd) / (2 * e->a);
t_vect v2;
v2 = vect_add(e->r.start, vect_sub(vect_scalaire(e->t0, e->r.direction), e->sph.posi));
if (dot(e->plane.normal, v2) > 0.0)
e->t0 =-1.0;
v2 = vect_add(e->r.start, vect_sub(vect_scalaire(e->t1, e->r.direction), e->sph.posi));
if (dot(e->plane.normal, v2) > 0.0)
e->t1 =-1.0;
if (e->t0 < 0.0)
e->t0 = e->t1;
if (e->t1 < 0.0)
e->t1 = e->t0;
double tt;
tt = fmin(e->t0, e->t1);
if (tt <= 0.0)
tt = fmax(e->t0, e->t1);
if (tt > 1e-6 && tt < e->t)
{
*t = tt;
return (1);
}
return (0);
}
And here is the Result:
The simplest way is to cut your sphere by a plane.
If you have plane normal than any direction (point on sphere - sphere center) with the same direction to normal is cut off. Simply by this condition:
dot(point on sphere - sphere center , plane normal ) > 0.0
But do not forget to test both intersections of ray and sphere as the closest one can be on the other side of plane ...
I tried to implement this into mine GLSL Ray tracer:
Reflection and refraction impossible without recursive ray tracing?
And come up with this updated fragment shaders:
Vertex (no change):
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
uniform float aspect;
uniform float focal_length;
uniform mat4x4 tm_eye;
layout(location=0) in vec2 pos;
out smooth vec2 txt_pos; // frag position on screen <-1,+1> for debug prints
out smooth vec3 ray_pos; // ray start position
out smooth vec3 ray_dir; // ray start direction
//------------------------------------------------------------------
void main(void)
{
vec4 p;
txt_pos=pos;
// perspective projection
p=tm_eye*vec4(pos.x/aspect,pos.y,0.0,1.0);
ray_pos=p.xyz;
p-=tm_eye*vec4(0.0,0.0,-focal_length,1.0);
ray_dir=normalize(p.xyz);
gl_Position=vec4(pos,0.0,1.0);
}
//------------------------------------------------------------------
Fragment (added hemispheres):
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
// Ray tracer ver: 1.000
//------------------------------------------------------------------
in smooth vec3 ray_pos; // ray start position
in smooth vec3 ray_dir; // ray start direction
uniform float n0; // refractive index of camera origin
uniform int fac_siz; // square texture x,y resolution size
uniform int fac_num; // number of valid floats in texture
uniform sampler2D fac_txr; // scene mesh data texture
out layout(location=0) vec4 frag_col;
//---------------------------------------------------------------------------
#define _reflect
#define _refract
//---------------------------------------------------------------------------
void main(void)
{
const vec3 light_dir=normalize(vec3(0.1,0.1,1.0));
const float light_iamb=0.1; // dot offset
const float light_idir=0.5; // directional light amplitude
const vec3 back_col=vec3(0.2,0.2,0.2); // background color
const float _zero=1e-6; // to avoid intrsection with start point of ray
const int _fac_triangles =0; // r,g,b,a, n, triangle count, { x0,y0,z0,x1,y1,z1,x2,y2,z2 }
const int _fac_spheres =1; // r,g,b,a, n, sphere count, { x,y,z,r }
const int _fac_hemispheres=2; // r,g,b,a, n, hemisphere count,{ x,y,z,r,nx,ny,nz }
// ray scene intersection
struct _ray
{
dvec3 pos,dir,nor;
vec3 col;
float refl,refr;// reflection,refraction intensity coeficients
float n0,n1; // refaction index (start,end)
double l; // ray length
int lvl,i0,i1; // recursion level, reflect, refract
};
const int _lvls=4;
const int _rays=(1<<_lvls)-1;
_ray ray[_rays]; int rays;
dvec3 v0,v1,v2,pos;
vec3 c;
float refr,refl,n1;
double tt,t,a;
int i0,ii,num,id;
// fac texture access
vec2 st; int i,j; float ds=1.0/float(fac_siz-1);
#define fac_get texture(fac_txr,st).r; st.s+=ds; i++; j++; if (j==fac_siz) { j=0; st.s=0.0; st.t+=ds; }
// enque start ray
ray[0].pos=ray_pos;
ray[0].dir=normalize(ray_dir);
ray[0].nor=vec3(0.0,0.0,0.0);
ray[0].refl=0.0;
ray[0].refr=0.0;
ray[0].n0=n0;
ray[0].n1=1.0;
ray[0].l =0.0;
ray[0].lvl=0;
ray[0].i0=-1;
ray[0].i1=-1;
rays=1;
// loop all enqued rays
for (i0=0;i0<rays;i0++)
{
// loop through all objects
// find closest forward intersection between them and ray[i0]
// strore it to ray[i0].(nor,col)
// strore it to pos,n1
t=tt=-1.0; ii=1; ray[i0].l=0.0;
ray[i0].col=back_col;
pos=ray[i0].pos; n1=n0;
for (st=vec2(0.0,0.0),i=j=0;i<fac_num;)
{
c.r=fac_get; // RGBA
c.g=fac_get;
c.b=fac_get;
refl=fac_get;
refr=fac_get;
n1=fac_get; // refraction index
a=fac_get; id=int(a); // object type
a=fac_get; num=int(a); // face count
if (id==_fac_triangles)
for (;num>0;num--)
{
v0.x=fac_get; v0.y=fac_get; v0.z=fac_get;
v1.x=fac_get; v1.y=fac_get; v1.z=fac_get;
v2.x=fac_get; v2.y=fac_get; v2.z=fac_get;
dvec3 e1,e2,n,p,q,r;
double t,u,v,det,idet;
//compute ray triangle intersection
e1=v1-v0;
e2=v2-v0;
// Calculate planes normal vector
p=cross(ray[i0].dir,e2);
det=dot(e1,p);
// Ray is parallel to plane
if (abs(det)<1e-8) continue;
idet=1.0/det;
r=ray[i0].pos-v0;
u=dot(r,p)*idet;
if ((u<0.0)||(u>1.0)) continue;
q=cross(r,e1);
v=dot(ray[i0].dir,q)*idet;
if ((v<0.0)||(u+v>1.0)) continue;
t=dot(e2,q)*idet;
if ((t>_zero)&&((t<=tt)||(ii!=0)))
{
ii=0; tt=t;
// store color,n ...
ray[i0].col=c;
ray[i0].refl=refl;
ray[i0].refr=refr;
// barycentric interpolate position
t=1.0-u-v;
pos=(v0*t)+(v1*u)+(v2*v);
// compute normal (store as dir for now)
e1=v1-v0;
e2=v2-v1;
ray[i0].nor=cross(e1,e2);
}
}
if (id==_fac_spheres)
for (;num>0;num--)
{
float r;
v0.x=fac_get; v0.y=fac_get; v0.z=fac_get; r=fac_get;
// compute l0 length of ray(p0,dp) to intersection with sphere(v0,r)
// where rr= r^-2
double aa,bb,cc,dd,l0,l1,rr;
dvec3 p0,dp;
p0=ray[i0].pos-v0; // set sphere center to (0,0,0)
dp=ray[i0].dir;
rr = 1.0/(r*r);
aa=2.0*rr*dot(dp,dp);
bb=2.0*rr*dot(p0,dp);
cc= rr*dot(p0,p0)-1.0;
dd=((bb*bb)-(2.0*aa*cc));
if (dd<0.0) continue;
dd=sqrt(dd);
l0=(-bb+dd)/aa;
l1=(-bb-dd)/aa;
if (l0<0.0) l0=l1;
if (l1<0.0) l1=l0;
t=min(l0,l1); if (t<=_zero) t=max(l0,l1);
if ((t>_zero)&&((t<=tt)||(ii!=0)))
{
ii=0; tt=t;
// store color,n ...
ray[i0].col=c;
ray[i0].refl=refl;
ray[i0].refr=refr;
// position,normal
pos=ray[i0].pos+(ray[i0].dir*t);
ray[i0].nor=pos-v0;
}
}
if (id==_fac_hemispheres)
for (;num>0;num--)
{
float r;
v0.x=fac_get; v0.y=fac_get; v0.z=fac_get; r=fac_get;
v1.x=fac_get; v1.y=fac_get; v1.z=fac_get;
// compute l0 length of ray(p0,dp) to intersection with sphere(v0,r)
// where rr= r^-2
double aa,bb,cc,dd,l0,l1,rr;
dvec3 p0,dp;
p0=ray[i0].pos-v0; // set sphere center to (0,0,0)
dp=ray[i0].dir;
rr = 1.0/(r*r);
aa=2.0*rr*dot(dp,dp);
bb=2.0*rr*dot(p0,dp);
cc= rr*dot(p0,p0)-1.0;
dd=((bb*bb)-(2.0*aa*cc));
if (dd<0.0) continue;
dd=sqrt(dd);
l0=(-bb+dd)/aa;
l1=(-bb-dd)/aa;
// test both hits-v0 against normal v1
v2=ray[i0].pos+(ray[i0].dir*l0)-v0; if (dot(v1,v2)>0.0) l0=-1.0;
v2=ray[i0].pos+(ray[i0].dir*l1)-v0; if (dot(v1,v2)>0.0) l1=-1.0;
if (l0<0.0) l0=l1;
if (l1<0.0) l1=l0;
t=min(l0,l1); if (t<=_zero) t=max(l0,l1);
if ((t>_zero)&&((t<=tt)||(ii!=0)))
{
ii=0; tt=t;
// store color,n ...
ray[i0].col=c;
ray[i0].refl=refl;
ray[i0].refr=refr;
// position,normal
pos=ray[i0].pos+(ray[i0].dir*t);
ray[i0].nor=pos-v0;
}
}
}
ray[i0].l=tt;
ray[i0].nor=normalize(ray[i0].nor);
// split ray from pos and ray[i0].nor
if ((ii==0)&&(ray[i0].lvl<_lvls-1))
{
t=dot(ray[i0].dir,ray[i0].nor);
// reflect
#ifdef _reflect
if ((ray[i0].refl>_zero)&&(t<_zero)) // do not reflect inside objects
{
ray[i0].i0=rays;
ray[rays]=ray[i0];
ray[rays].lvl++;
ray[rays].i0=-1;
ray[rays].i1=-1;
ray[rays].pos=pos;
ray[rays].dir=ray[rays].dir-(2.0*t*ray[rays].nor);
ray[rays].n0=ray[i0].n0;
ray[rays].n1=ray[i0].n0;
rays++;
}
#endif
// refract
#ifdef _refract
if (ray[i0].refr>_zero)
{
ray[i0].i1=rays;
ray[rays]=ray[i0];
ray[rays].lvl++;
ray[rays].i0=-1;
ray[rays].i1=-1;
ray[rays].pos=pos;
t=dot(ray[i0].dir,ray[i0].nor);
if (t>0.0) // exit object
{
ray[rays].n0=ray[i0].n0;
ray[rays].n1=n0;
if (i0==0) ray[i0].n1=n1;
v0=-ray[i0].nor; t=-t;
}
else{ // enter object
ray[rays].n0=n1;
ray[rays].n1=ray[i0].n0;
ray[i0 ].n1=n1;
v0=ray[i0].nor;
}
n1=ray[i0].n0/ray[i0].n1;
tt=1.0-(n1*n1*(1.0-t*t));
if (tt>=0.0)
{
ray[rays].dir=(ray[i0].dir*n1)-(v0*((n1*t)+sqrt(tt)));
rays++;
}
}
#endif
}
else if (i0>0) // ignore last ray if nothing hit
{
ray[i0]=ray[rays-1];
rays--; i0--;
}
}
// back track ray intersections and compute output color col
// lvl is sorted ascending so backtrack from end
for (i0=rays-1;i0>=0;i0--)
{
// directional + ambient light
t=abs(dot(ray[i0].nor,light_dir)*light_idir)+light_iamb;
t*=1.0-ray[i0].refl-ray[i0].refr;
ray[i0].col.rgb*=float(t);
// reflect
ii=ray[i0].i0;
if (ii>=0) ray[i0].col.rgb+=ray[ii].col.rgb*ray[i0].refl;
// refract
ii=ray[i0].i1;
if (ii>=0) ray[i0].col.rgb+=ray[ii].col.rgb*ray[i0].refr;
}
frag_col=vec4(ray[0].col,1.0);
}
//---------------------------------------------------------------------------
The Vertex shader just creates the Ray position and direction which is interpolated by GPU and then Fragment shader handles each ray (per pixel).
I use this scene:
// init mesh raytracer
ray.gl_init();
ray.beg();
// r g b rfl rfr n
ray.add_material(1.0,0.7,0.1,0.3,0.0,_n_glass); ray.add_hemisphere( 0.0, 0.0, 2.0,0.5, 0.0, 0.0, 1.0);
ray.add_material(1.0,1.0,1.0,0.3,0.0,_n_glass); ray.add_box ( 0.0, 0.0, 6.0,9.0,9.0,0.1);
ray.add_material(1.0,1.0,1.0,0.1,0.8,_n_glass); ray.add_sphere ( 0.0, 0.0, 0.5,0.5);
ray.add_material(1.0,0.1,0.1,0.3,0.0,_n_glass); ray.add_sphere (+2.0, 0.0, 2.0,0.5);
ray.add_material(0.1,1.0,0.1,0.3,0.0,_n_glass); ray.add_box (-2.0, 0.0, 2.0,0.5,0.5,0.5);
ray.add_material(0.1,0.1,1.0,0.3,0.0,_n_glass);
ray.add_tetrahedron
(
0.0, 0.0, 3.0,
-1.0,-1.0, 4.0,
+1.0,-1.0, 4.0,
0.0,+1.0, 4.0
);
ray.end();
containing single yellow hemisphere at (0.0, 0.0, 2.0) with radius r=0.5 and plane normal (0.0, 0.0, 1.0). Rotation of the object can by done simply by rotating the plane normal.
And this is preview:
As you can see hemisphere is working by just cutting with a plane ... The only important code from above for you is this (see the *** comments):
if (id==_fac_hemispheres) // *** ignore
for (;num>0;num--) // *** ignore
{
float r;
// *** here v0 is center, v1 is plane normal and r is radius
v0.x=fac_get; v0.y=fac_get; v0.z=fac_get; r=fac_get;
v1.x=fac_get; v1.y=fac_get; v1.z=fac_get;
// *** this is ray/ellipsoid intersection returning l0,l1 ray distances for both hits
// compute l0 length of ray(p0,dp) to intersection with sphere(v0,r)
// where rr= r^-2
double aa,bb,cc,dd,l0,l1,rr;
dvec3 p0,dp;
p0=ray[i0].pos-v0; // set sphere center to (0,0,0)
dp=ray[i0].dir;
rr = 1.0/(r*r);
aa=2.0*rr*dot(dp,dp);
bb=2.0*rr*dot(p0,dp);
cc= rr*dot(p0,p0)-1.0;
dd=((bb*bb)-(2.0*aa*cc));
if (dd<0.0) continue;
dd=sqrt(dd);
l0=(-bb+dd)/aa;
l1=(-bb-dd)/aa;
// *** this thro away hits on wrong side of plane
// test both hits-v0 against normal v1
v2=ray[i0].pos+(ray[i0].dir*l0)-v0; if (dot(v1,v2)>0.0) l0=-1.0;
v2=ray[i0].pos+(ray[i0].dir*l1)-v0; if (dot(v1,v2)>0.0) l1=-1.0;
// *** this is just using closer valid hit
if (l0<0.0) l0=l1;
if (l1<0.0) l1=l0;
t=min(l0,l1); if (t<=_zero) t=max(l0,l1);
if ((t>_zero)&&((t<=tt)||(ii!=0)))
{
ii=0; tt=t;
// store color,n ...
ray[i0].col=c;
ray[i0].refl=refl;
ray[i0].refr=refr;
// position,normal
pos=ray[i0].pos+(ray[i0].dir*t);
ray[i0].nor=pos-v0;
}
}
I used mine ray and ellipsoid intersection accuracy improvement as it returns both hits not just the first one.
If you cross check the spheres and hemispheres you will see I just added these two lines:
v2=ray[i0].pos+(ray[i0].dir*l0)-v0; if (dot(v1,v2)>0.0) l0=-1.0;
v2=ray[i0].pos+(ray[i0].dir*l1)-v0; if (dot(v1,v2)>0.0) l1=-1.0;
which just converts ray distances to hit positions and computing the condition mentioned above...
I'm working on a OpenGL project and i need some brief explanation on the core components of the subject as i need to explain to somebody needy.
Following is the part of the program
The below are the global variables and header files used in the program
#include<GL/glut.h>
#include<math.h>
#include<stdbool.h>
#define PI 3.14159265f
#include<stdio.h>
GLfloat ballRadius = 0.2,xradius=0.2,xxradius=1.0;
GLfloat ballX = 0.0f;
GLfloat ballY = 0.0f;
GLfloat ballXMax,ballXMin,ballYMax,ballYMin;
GLfloat xSpeed = 0.02f;
GLfloat ySpeed = 0.007f;
int refreshMills = 30;
GLfloat angle=0.0;
int xa,ya;
int flag=0,flag1=0;
int score = 0;
void *currentfont;
GLfloat xo=0, yo=0, x, y;
GLdouble clipAreaXLeft,clipAreaXRight,clipAreaYBottom,clipAreaYTop;
void balldisp() ;
void scoredisp();
This is the reshape function. I need to do what exactly it is doing, what it is calculating and storing. Confused here
void reshape(GLsizei width,GLsizei height)
{
GLfloat aspect = (GLfloat)width / (GLfloat)height;
glViewport(0,0,width,height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
if(width >=height)
{
clipAreaXLeft = -1.0 * aspect;
clipAreaXRight = 1.0 * aspect;
clipAreaYBottom = -1.0;
clipAreaYTop = 1.0;
}
else
{
clipAreaXLeft = -1.0;
clipAreaXRight = 1.0 ;
clipAreaYBottom = -1.0 / aspect;
clipAreaYTop = 1.0/ aspect;
}
gluOrtho2D(clipAreaXLeft,clipAreaXRight,clipAreaYBottom,clipAreaYTop+0.10);
ballXMin = clipAreaXLeft + ballRadius;
ballXMax = clipAreaXRight - ballRadius;
ballYMin = clipAreaYBottom + ballRadius;
ballYMax = clipAreaYTop - ballRadius;
}
The below is the code to display the ball. What it is calculating and how the speed and direction is set. Confused here
void balldisp()
{
glTranslatef(ballX,ballY,0.0f);
glBegin(GL_TRIANGLE_FAN);
color();
glVertex2f(0.0f,0.0f);
int numSegments = 100;
int i;
for(i=0;i<=numSegments;i++)
{
angle = i*2.0f*PI/numSegments;
glVertex2f(cos(angle)*ballRadius,sin(angle)*ballRadius);
}
glEnd();
ballX += xSpeed;
ballY += ySpeed;
if(ballX > ballXMax)
{ xa=ballX;
ballX = ballXMax;
xSpeed = -xSpeed;
}
else if(ballX < ballXMin)
{ xa=ballX;
ballX = ballXMin;
xSpeed = -xSpeed;
}
if(ballY > ballYMax)
{ ya=ballY;
ballY = ballYMax;
ySpeed = -ySpeed;
}
else if(ballY < ballYMin)
{ ya=ballY;
ballY = ballYMin;
ySpeed = -ySpeed;
}
I want to know the reshape function and ball display. What are they doing and how things are done there.
P.S. The project is about random motion of the ball which strikes the boundaries of the window and moves in other direction
The reshape function is registered with GLUT (using glutReshapeFunc) so that it gets called by GLUT whenever the size of the window changes. Note that placing OpenGL functions for setting the viewport and/or the projection matrix in the reshape function is bad style and should be avoided. All OpenGL drawing related functions (which glViewport and the matrix setup are) belong into the display functions.
Similarly the display function is registered with GLUT (using glutDisplayFunc) so that it gets called by GLUT whenever the windows needs to be redrawn (either because it got visible, contents need refreshing or redraw has been requested with glutPostRedisplay).
I created this function that draws a simple polygon with n number of vertexes:
void polygon (int n)
{
double pI = 3.141592653589;
double area = min(width / 2, height / 2);
int X = 0, Y = area - 1;
double offset = Y;
int lastx, lasty;
double radius = sqrt(X * X + Y * Y);
double quadrant = atan2(Y, X);
int i;
for (i = 1; i <= n; i++)
{
lastx = X; lasty = Y;
quadrant = quadrant + pI * 2.0 / n;
X = round((double)radius * cos(quadrant));
Y = round((double)radius * sin(quadrant));
setpen((i * 255) / n, 0, 0, 0.0, 1); // r(interval) g b, a, size
moveto(offset + lastx, offset + lasty); // Moves line offset
lineto(offset + X, offset + Y); // Draws a line from offset
}
}
How can I fill it with a solid color?
I have no idea how can I modify my code in order to draw it filled.
The common approach to fill shapes is to find where the edges of the polygon cross either each x or each y coordinate. Usually, y coordinates are used, so that the filling can be done using horizontal lines. (On framebuffer devices like VGA, horizontal lines are faster than vertical lines, because they use consecutive memory/framebuffer addresses.)
In that vein,
void fill_regular_polygon(int center_x, int center_y, int vertices, int radius)
{
const double a = 2.0 * 3.14159265358979323846 / (double)vertices;
int i = 1;
int y, px, py, nx, ny;
if (vertices < 3 || radius < 1)
return;
px = 0;
py = -radius;
nx = (int)(0.5 + radius * sin(a));
ny = (int)(0.5 - radius * cos(a));
y = -radius;
while (y <= ny || ny > py) {
const int x = px + (nx - px) * (y - py) / (ny - py);
if (center_y + y >= 0 && center_y + y < height) {
if (center_x - x >= 0)
moveto(center_x - x, center_y + y);
else
moveto(0, center_y + y);
if (center_x + x < width)
lineto(center_x + x, center_y + y);
else
lineto(width - 1, center_y + y);
}
y++;
while (y > ny) {
if (nx < 0)
return;
i++;
px = nx;
py = ny;
nx = (int)(0.5 + radius * sin(a * (double)i));
ny = (int)(0.5 - radius * cos(a * (double)i));
}
}
}
Note that I only tested the above with a simple SVG generator, and compared the drawn lines to the polygon. Seems to work correctly, but use at your own risk; no guarantees.
For general shapes, use your favourite search engine to look for "polygon filling" algorithms. For example, this, this, this, and this.
There are 2 different ways to implement a solution:
Scan-line
Starting at the coordinate that is at the top (smallest y value), continue to scan down line by line (incrementing y) and see which edges intersect the line.
For convex polygons you find 2 points, (x1,y) and (x2,y). Simply draw a line between those on each scan-line.
For concave polygons this can also be a multiple of 2. Simply draw lines between each pair. After one pair, go to the next 2 coordinates. This will create a filled/unfilled/filled/unfilled pattern on that scan line which resolves to the correct overall solution.
In case you have self-intersecting polygons, you would also find coordinates that are equal to some of the polygon points, and you have to filter them out. After that, you should be in one of the cases above.
If you filtered out the polygon points during scan-lining, don't forget to draw them as well.
Flood-fill
The other option is to use flood-filling. It has to perform more work evaluating the border cases at every step per pixel, so this tends to turn out as a slower version. The idea is to pick a seed point within the polygon, and basically recursively extend up/down/left/right pixel by pixel until you hit a border.
The algorithm has to read and write the entire surface of the polygon, and does not cross self-intersection points. There can be considerable stack-buildup (for naive implementations at least) for large surfaces, and the reduced flexibility you have for the border condition is pixel-based (e.g. flooding into gaps when other things are drawn on top of the polygon). In this sense, this is not a mathematically correct solution, but it works well for many applications.
The most efficient solution is by decomposing the regular polygon in trapezoids (and one or two triangles).
By symmetry, the vertexes are vertically aligned and it is an easy matter to find the limiting abscissas (X + R cos(2πn/N) and X + R cos(2π(+1)N)).
You also have the ordinates (Y + R sin(2πn/N) and Y + R sin(2π(+1)N)) and it suffices to interpolate linearly between two vertexes by Y = Y0 + (Y1 - Y0) (X - X0) / (X1 - X0).
Filling in horizontal runs is a little more complex, as the vertices may not be aligned horizontally and there are more trapezoids.
Anyway, it seems that I / solved / this myself again, when not relying on assistance (or any attempt for it)
void polygon (int n)
{
double pI = 3.141592653589;
double area = min(width / 2, height / 2);
int X = 0, Y = area - 1;
double offset = Y;
int lastx, lasty;
while(Y-->0) {
double radius = sqrt(X * X + Y * Y);
double quadrant = atan2(Y, X);
int i;
for (i = 1; i <= n; i++)
{
lastx = X; lasty = Y;
quadrant = quadrant + pI * 2.0 / n;
X = round((double)radius * cos(quadrant));
Y = round((double)radius * sin(quadrant));
//setpen((i * 255) / n, 0, 0, 0.0, 1);
setpen(255, 0, 0, 0.0, 1); // just red
moveto(offset + lastx, offset + lasty);
lineto(offset + X, offset + Y);
} }
}
As you can see, it isn't very complex, which means it might not be the most efficient solution either.. but it is close enough.
It decrements radius and fills it by virtue of its smaller version with smaller radius.
On that way, precision plays an important role and the higher n is the less accuracy it will be filled with.