Grainy looking sphere in my ray tracer - c

I am trying to write a simple ray tracer. The final image should like this: I have read stuff about it and below is what I am doing:
create an empty image (to fill each pixel, via ray tracing)
for each pixel [for each row, each column]
create the equation of the ray emanating from our pixel
trace() ray:
if ray intersects SPHERE
compute local shading (including shadow determination)
return color;
Now, the scene data is like: It sets a gray sphere of radius 1 at (0,0,-3). It sets a white light source at the origin.
2
amb: 0.3 0.3 0.3
sphere
pos: 0.0 0.0 -3.0
rad: 1
dif: 0.3 0.3 0.3
spe: 0.5 0.5 0.5
shi: 1
light
pos: 0 0 0
col: 1 1 1
Mine looks very weird :
//check ray intersection with the sphere
boolean intersectsWithSphere(struct point rayPosition, struct point rayDirection, Sphere sp,float* t){
//float a = (rayDirection.x * rayDirection.x) + (rayDirection.y * rayDirection.y) +(rayDirection.z * rayDirection.z);
// value for a is 1 since rayDirection vector is normalized
double radius = sp.radius;
double xc = sp.position[0];
double yc =sp.position[1];
double zc =sp.position[2];
double xo = rayPosition.x;
double yo = rayPosition.y;
double zo = rayPosition.z;
double xd = rayDirection.x;
double yd = rayDirection.y;
double zd = rayDirection.z;
double b = 2 * ((xd*(xo-xc))+(yd*(yo-yc))+(zd*(zo-zc)));
double c = (xo-xc)*(xo-xc) + (yo-yc)*(yo-yc) + (zo-zc)*(zo-zc) - (radius * radius);
float D = b*b + (-4.0f)*c;
//ray does not intersect the sphere
if(D < 0 ){
return false;
}
D = sqrt(D);
float t0 = (-b - D)/2 ;
float t1 = (-b + D)/2;
//printf("D=%f",D);
//printf(" t0=%f",t0);
//printf(" t1=%f\n",t1);
if((t0 > 0) && (t1 > 0)){
*t = min(t0,t1);
return true;
}
else {
*t = 0;
return false;
}
}
Below is the trace() function:
unsigned char* trace(struct point rayPosition, struct point rayDirection, Sphere * totalspheres) {
struct point tempRayPosition = rayPosition;
struct point tempRayDirection = rayDirection;
float f=0;
float tnear = INFINITY;
boolean sphereIntersectionFound = false;
int sphereIndex = -1;
for(int i=0; i < num_spheres ; i++){
float t = INFINITY;
if(intersectsWithSphere(tempRayPosition,tempRayDirection,totalspheres[i],&t)){
if(t < tnear){
tnear = t;
sphereIntersectionFound = true;
sphereIndex = i;
}
}
}
if(sphereIndex < 0){
//printf("No interesection found\n");
mycolor[0] = 1;
mycolor[1] = 1;
mycolor[2] = 1;
return mycolor;
}
else {
Sphere sp = totalspheres[sphereIndex];
//intersection point
hitPoint[0].x = tempRayPosition.x + tempRayDirection.x * tnear;
hitPoint[0].y = tempRayPosition.y + tempRayDirection.y * tnear;
hitPoint[0].z = tempRayPosition.z + tempRayDirection.z * tnear;
//normal at the intersection point
normalAtHitPoint[0].x = (hitPoint[0].x - totalspheres[sphereIndex].position[0])/ totalspheres[sphereIndex].radius;
normalAtHitPoint[0].y = (hitPoint[0].y - totalspheres[sphereIndex].position[1])/ totalspheres[sphereIndex].radius;
normalAtHitPoint[0].z = (hitPoint[0].z - totalspheres[sphereIndex].position[2])/ totalspheres[sphereIndex].radius;
normalizedNormalAtHitPoint[0] = normalize(normalAtHitPoint[0]);
for(int j=0; j < num_lights ; j++) {
for(int k=0; k < num_spheres ; k++){
shadowRay[0].x = lights[j].position[0] - hitPoint[0].x;
shadowRay[0].y = lights[j].position[1] - hitPoint[0].y;
shadowRay[0].z = lights[j].position[2] - hitPoint[0].z;
normalizedShadowRay[0] = normalize(shadowRay[0]);
//R = 2 * ( N dot L) * N - L
reflectionRay[0].x = - 2 * dot(normalizedShadowRay[0],normalizedNormalAtHitPoint[0]) * normalizedNormalAtHitPoint[0].x +normalizedShadowRay[0].x;
reflectionRay[0].y = - 2 * dot(normalizedShadowRay[0],normalizedNormalAtHitPoint[0]) * normalizedNormalAtHitPoint[0].y +normalizedShadowRay[0].y;
reflectionRay[0].z = - 2 * dot(normalizedShadowRay[0],normalizedNormalAtHitPoint[0]) * normalizedNormalAtHitPoint[0].z +normalizedShadowRay[0].z;
normalizeReflectionRay[0] = normalize(reflectionRay[0]);
struct point temp;
temp.x = hitPoint[0].x + (shadowRay[0].x * 0.0001 );
temp.y = hitPoint[0].y + (shadowRay[0].y * 0.0001);
temp.z = hitPoint[0].z + (shadowRay[0].z * 0.0001);
struct point ntemp = normalize(temp);
float f=0;
struct point tempHitPoint;
tempHitPoint.x = hitPoint[0].x + 0.001;
tempHitPoint.y = hitPoint[0].y + 0.001;
tempHitPoint.z = hitPoint[0].z + 0.001;
if(intersectsWithSphere(hitPoint[0],ntemp,totalspheres[k],&f)){
// if(intersectsWithSphere(tempHitPoint,ntemp,totalspheres[k],&f)){
printf("In shadow\n");
float r = lights[j].color[0];
float g = lights[j].color[1];
float b = lights[j].color[2];
mycolor[0] = ambient_light[0] + r;
mycolor[1] = ambient_light[1] + g;
mycolor[2] = ambient_light[2] + b;
return mycolor;
} else {
// point is not is shadow , use Phong shading to determine the color of the point.
//I = lightColor * (kd * (L dot N) + ks * (R dot V) ^ sh)
//(for each color channel separately; note that if L dot N < 0, you should clamp L dot N to zero; same for R dot V)
float x = dot(normalizedShadowRay[0],normalizedNormalAtHitPoint[0]);
if(x < 0)
x = 0;
V[0].x = - rayDirection.x;
V[0].x = - rayDirection.y;
V[0].x = - rayDirection.z;
normalizedV[0] = normalize(V[0]);
float y = dot(normalizeReflectionRay[0],normalizedV[0]);
if(y < 0)
y = 0;
float ar = totalspheres[sphereIndex].color_diffuse[0] * x;
float br = totalspheres[sphereIndex].color_specular[0] * pow(y,totalspheres[sphereIndex].shininess);
float r = lights[j].color[0] * (ar+br);
//----------------------------------------------------------------------------------
float bg = totalspheres[sphereIndex].color_specular[1] * pow(y,totalspheres[sphereIndex].shininess);
float ag = totalspheres[sphereIndex].color_diffuse[1] * x;
float g = lights[j].color[1] * (ag+bg);
//----------------------------------------------------------------------------------
float bb = totalspheres[sphereIndex].color_specular[2] * pow(y,totalspheres[sphereIndex].shininess);
float ab = totalspheres[sphereIndex].color_diffuse[2] * x;
float b = lights[j].color[2] * (ab+bb);
mycolor[0] = r + ambient_light[0];
mycolor[1] = g + ambient_light[1];
mycolor[2] = b+ ambient_light[2];
return mycolor;
}
}
}
}
}
The code calling trace() looks like :
void draw_scene()
{
//Aspect Ratio
double a = WIDTH / HEIGHT;
double angel = tan(M_PI * 0.5 * fov/ 180);
ray[0].x = 0.0;
ray[0].y = 0.0;
ray[0].z = 0.0;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
unsigned int x,y;
float sx, sy;
for(x=0;x < WIDTH;x++)
{
glPointSize(2.0);
glBegin(GL_POINTS);
for(y=0;y < HEIGHT;y++)
{
sx = (((x + 0.5) / WIDTH) * 2.0 ) - 1;
sy = (((y + 0.5) / HEIGHT) * 2.0 ) - 1;;
sx = sx * angel * a;
sy = sy * angel;
//set ray direction
ray[1].x = sx;
ray[1].y = sy;
ray[1].z = -1;
normalizedRayDirection[0] = normalize(ray[1]);
unsigned char* color = trace(ray[0],normalizedRayDirection[0],spheres);
unsigned char x1 = color[0] * 255;
unsigned char y1 = color[1] * 255;
unsigned char z1 = color[2] * 255;
plot_pixel(x,y,x1 %256,y1%256,z1%256);
}
glEnd();
glFlush();
}
}
There could be many, many problems with the code/understanding.

I haven't taken the time to understand all your code, and I'm definitely not a graphics expert, but I believe the problem you have is called "surface acne". In this case it's probably happening because your shadow rays are intersecting with the object itself. What I did in my code to fix this is add epsilon * hitPoint.normal to the shadow ray origin. This effectively moves the ray away from your object a bit, so they don't intersect.
The value I'm using for epsilon is the square root of 1.19209290 * 10^-7, as that is the square root of a constant called EPSILON that is defined in the particular language I'm using.

What possible reason do you have for doing this (in the non-shadow branch of trace (...)):
V[0].x = - rayDirection.x;
V[0].x = - rayDirection.y;
V[0].x = - rayDirection.z;
You might as well comment out the first two computations since you write the results of each to the same component. I think you probably meant to do this instead:
V[0].x = - rayDirection.x;
V[0].y = - rayDirection.y;
V[0].z = - rayDirection.z;
That said, you should also avoid using GL_POINT primitives to cover a 2x2 pixel quad. Point primitives are not guaranteed to be square, and OpenGL implementations are not required to support any size other than 1.0. In practice, most support 1.0 - ~64.0 but glDrawPixels (...) is a much better way of writing 2x2 pixels, since it skips primitive assembly and the above mentioned limitations. You are using immediate mode in this example anyway, so glRasterPos (...) and glDrawPixels (...) are still a valid approach.

It seems you are implementing the formula here, but you deviate at the end from the direction the article takes.
First the article warns that D & b can be very close in value, so that -b + D gets you a very limited number. They suggest an alternative.
Also, you are testing that both t0 & t1 > 0. This doesn't have to be true for you to hit the sphere, you could be inside of it (though you obviously should not be in your test scene).
Finally, I would add a test at the beginning to confirm that the direction vector is normalized. I've messed that up more than once in my renderers.

Related

How to use a lookAt matrix to compute ray in raytracing?

As I understand, the 'lookat' method is one of the simplest way to placing/rotate the camera in a scene. So I implemented the Matrix available on (https://www.scratchapixel.com/lessons/mathematics-physics-for-computer-graphics/lookat-function) in the code of my ray-tracing but I have no idea of how using it to compute rays.
Basically what I do is place the camera at negatives Z, send a ray to positive Z and select the pixel iterating the X and Y of my view plane.
It is easy because the view plane is in front of the camera and I have to simply assign X and Y of my iterations to ray destination X and Y.
However I would like to be able to send ray in any part of the space.
Could you please help me to understand how to do that?
Thank you!
What I do basically:
{
double deg = 50.;
double rad = deg / (180.0 / M_PI);
double distance = (WIDTH / 2) * (cotan(rad / 2));
ray.orig.x = HEIGH / 2.0;
ray.orig.y = WIDTH / 2.0;
ray.orig.z = -distance;
y = -1;
while (++y <= HEIGH)
{
x = -1;
while (++x <= WIDTH)
{
ray.dest.x = x - ray.orig.x;
ray.dest.y = y - ray.orig.y;
ray.dest.z = 0. - ray.orig.z;
ray.dest = ve_normalize(&ray.dest);
check_objects(c, &ray, 0);
add_diffuse_light(c);
put_pixel(c, &x, &y);
}
}
}
The functions to handle the lookat matrix:
t_lookat lookati(t_vector *from, t_vector *to)
{
t_lookat lookat;
t_vector fo;
t_vector ri;
t_vector up;
t_vector tmp;
tmp.x = 0; tmp.y = 1; tmp.z = 0;
fo = ve_subtraction(from, to);
fo = ve_normalize(&fo);
ri = ve_cross(&tmp, &fo);
ri = ve_normalize(&ri);
up = ve_cross(&fo, &ri);
up = ve_normalize(&up);
lookat.ri.x = ri.x;
lookat.ri.y = ri.y;
lookat.ri.z = ri.z;
lookat.up.x = up.x;
lookat.up.y = up.y;
lookat.up.z = up.z;
lookat.fo.x = fo.x;
lookat.fo.y = fo.y;
lookat.fo.z = fo.z;
lookat.fr.x = from->x;
lookat.fr.y = from->y;
lookat.fr.z = from->z;
return(lookat);
}
t_vector orientate(t_vector *a, t_vector *from, t_vector *to)
{
t_lookat k;
k = lookati(from, to);
t_vector orientate;
orientate.x = a->x * k.ri.x + a->y * k.up.x + a->z * k.fo.x + a->x * k.fr.x;
orientate.y = a->x * k.ri.y + a->y * k.up.y + a->z * k.fo.y + a->x * k.fr.y;
orientate.z = a->x * k.ri.z + a->y * k.up.z + a->z * k.fo.z + a->x * k.fr.z;
return(orientate);
}
Thank you guys, finally I solved the problem reading this guide (https://steveharveynz.wordpress.com/2012/12/20/ray-tracer-part-two-creating-the-camera) which suggests to normalize coordinates (like the pixel range of the user "Spektre") without using a matrix.
Ps.
typedef struct s_vector
{
double x;
double y;
double z;
} t_vector;
typedef struct s_lookat
{
t_vector ri; //right vector
t_vector up; // up
t_vector fo; // foorward
t_vector fr; // eye position
} t_lookat;

Algorithm incorrectly says ray intersects triangle above it

This is one of many similar ray-triangle intersection algorithms. Every other algorithm I've tested also returns true for these numbers, while the ray clearly does not cross the triangle. The ray goes from y=0 to y=1, while the triangle is flat across y = 2.3.
This is not a winding issue, as it should never return true (winding issues would explain false negatives, not false positives).
All code necessary to reproduce in C or C++ is included here.
What am I missing?
#define vector(a,b,c) \
(a)[0] = (b)[0] - (c)[0]; \
(a)[1] = (b)[1] - (c)[1]; \
(a)[2] = (b)[2] - (c)[2];
#define crossProduct(a,b,c) \
(a)[0] = (b)[1] * (c)[2] - (c)[1] * (b)[2]; \
(a)[1] = (b)[2] * (c)[0] - (c)[2] * (b)[0]; \
(a)[2] = (b)[0] * (c)[1] - (c)[0] * (b)[1];
#define innerProduct(v,q) \
((v)[0] * (q)[0] + \
(v)[1] * (q)[1] + \
(v)[2] * (q)[2])
#define DOT(A,B) \
((A)[0] * (B)[0] + (A)[1] * (B)[1] + (A)[2] * (B)[2])
int intersect3D_RayTriangle( )
{
// dir, w0, w; // ray vectors
double r, a, b; // params to calc ray-plane intersect
// output: Point* I
//Ray R
double origin[3] = {0,0,0};//{orig[0],orig[1],orig[2]};
double direction[3] = {0,1,0};//{dir[0],dir[1],dir[2]};
//Triangle T
double corner1[3] = {3, 2.3, -4 };//{v0[0],v0[1],v0[2]};
double corner2[3] = {-7, 2.3, 2};//{v1[0],v1[1],v1[2]};
double corner3[3] = {3, 2.3, 2};// v2[0],v2[1],v2[2]};
// Vector u, v, n; // triangle vectors
double u[3] = {corner2[0]-corner1[0],corner2[1]-corner1[1],corner2[2]-corner1[2]};
double v[3] = {corner3[0]-corner1[0],corner3[1]-corner1[1],corner3[2]-corner1[2]};
double n[3] = {0,0,0};
double e1[3],e2[3],h[3],q[3];
double f;
// get triangle edge vectors and plane normal
crossProduct(n, u, v);
if ((n[0] == 0) && (n[1] == 0) && (n[2] == 0)) // triangle is wonky
return -1; // do not deal with this case
// dir = R.P1 - R.P0; // ray direction vector
double rayDirection[3] = {direction[0] - origin[0], direction[1] - origin[1], direction[2] - origin[2]};
//w0 = R.P0 - T.V0;
double w0[3] = {origin[0] - corner1[0], origin[1] - corner1[1], origin[2] - corner1[2]};
a = -DOT(n,w0);
b = DOT(n,rayDirection);
if (fabs(b) < __DBL_EPSILON__) { // ray is parallel to triangle plane
if (a == 0) // ray lies in triangle plane
return 2;
else return 0; // ray disjoint from plane
}
// get intersect point of ray with triangle plane
r = a / b;
if (r < 0.0) // ray goes away from triangle
return 0; // => no intersect
// for a segment, also test if (r > 1.0) => no intersect
//*I = R.P0 + r * dir; // intersect point of ray and plane
double I[3] = {0,0,0};
I[0] = origin[0] + rayDirection[0] * r;
I[1] = origin[1] + rayDirection[1] * r;
I[2] = origin[2] + rayDirection[2] * r;
// is I inside T?
double uu, uv, vv, wu, wv, D;
uu = DOT(u,u);
uv = DOT(u,v);
vv = DOT(v,v);
double w[3] = {0,0,0};
w[0] = I[0] - corner1[0];
w[1] = I[1] - corner1[1];
w[2] = I[2] - corner1[2];
wu = DOT(w,u);
wv = DOT(w,v);
D = uv * uv - uu * vv;
// get and test parametric coords
double s, t;
s = (uv * wv - vv * wu) / D;
if (s < 0.0 || s > 1.0) // I is outside T
return 0;
t = (uv * wu - uu * wv) / D;
if (t < 0.0 || (s + t) > 1.0) // I is outside T
return 0;
return 1; // I is in T
}
Code works fine for "rays".
OP expected that that "ray" code functioned like a "segment" one.
Could use the r value to testing for "segment" exclusion.
if (r > 1.0) return 0;

Ray Tracing calculation in C

I'm new to ray tracing and trying to program one in C. But My program keep on showing a dot (around 1-3 pixel) of the sphere in the wrong places and now I'm confused. This feels like a very stupid question, but I'm confused about exactly how big is 1 radius of a sphere? What I mean by that is if the radius is 1, the circle is 2 pixels?
I know all the calculations and I triple checked if I had any errors in my codes. but just incase, here is part of my codes:
Directions:
//size: 1024x768, view point (512 384 1), screen (0 0 0) to (1024 768 0)
ray[0] = x - start_x;
ray[1] = y - start_y;
ray[2] = 0 - start_z;
//normalize
double length;
length = (sqrt((ray[0]*ray[0]) + (ray[1]*ray[1]) + (ray[2]*ray[2])));
ray[0] = ray[0]/length;
ray[1] = ray[1]/length;
ray[2] = ray[2]/length;
Intersection:
temp = top; //my struct with sphere data, _x, _y, _z, _r, _red, _green, _blue
//x and y is the current pixel value
while (temp != NULL) {
x_diff = start_x - temp->_x + 0.0;
y_diff = start_y - temp->_y + 0.0;
z_diff = start_z - temp->_z + 0.0;
//a = 1 because my direction is a normalized
b = 2.0 * ((rayVector[0] * x_diff) + (rayVector[1] * y_diff) + (rayVector[2] * z_diff));
c = (x_diff * x_diff * 1.0) + (y_diff * y_diff) + (z_diff * z_diff) - (temp->_r * temp->_r);
check = (b * b) - (4.0 * c);
if (check < 0) { //0
pixels[width][height][0] = 0.0;
pixels[width][height][1] = 0.0;
pixels[width][height][2] = 0.0;
}
else if (check == 0) { //1
r1 = (b * -1.0) /2.0;
if (r1 < nearest_z) {
nearest_z = r1;
pixels[width][height][0] = temp->_red;
pixels[width][height][1] = temp->_green;
pixels[width][height][2] = temp->_blue;
}
}
else { //2
r1 = ((b * -1.0) + sqrt(check))/2.0;
r2 = ((b * -1.0) - sqrt(check))/2.0;
if ((r1 < r2) && (r1 < nearest_z)) {
nearest_z = r1;
pixels[width][height][0] = 255.0;
pixels[width][height][1] = 0;
pixels[width][height][2] = 0;
}
else if ((r2 < r1) && (r2 < nearest_z)) {
nearest_z = r2;
pixels[width][height][0] = temp->_red;
pixels[width][height][1] = temp->_green;
pixels[width][height][2] = temp->_blue;
}
}
temp = temp->next;
}
I haven't done any lightings yet since the flat colouring it doesn't work. I'm new to openGL so expect me to miss some common functions in the codes. Thanks in advance.
Edit:
I only have one sphere currently, but my output looks like: img1
I was expecting a bigger circle? Also, I had a printf for each intersection (if there is) and when I manually plot in a paper, it is a 4x5 pixel square. But there are 4 dots in the output.
Edit 2: I change the size of the sphere to: x = 512 y = 384 z = -21 r = 30, it gave me this:
img2
Again, I only have one sphere and there are 4 in the image. Also, there are holds between the lines?
If I change the z value to -20, now my output is all white (colour of sphere).
I use glDrawPixels(1024,768,GL_RGB,GL_FLOAT,pixels); to draw
I had a RBG output file, everything seems to be in the right place. but when I draw on the program, it is off.

How to implement adaptive subdivision algorithm for curve in C

My homework is to write a C program with openGL/Glut which, after getting groups of 4 points by mouse click (points with 3 coordinates), should draw a bezier curve with adaptive algorithm. At a theoretical level it's clear how the algorithm works but I don't know how to put that in C code. I mean that at lesson we saw that the 4 control points could have a shape similar to a "trapeze" and then the algorithm calculates the two "heights" and then checks if they satisfy a tollerance. The problem is that the user might click everywhere in the screen and the points might not have trapeze-like shape...so, where can I start from? This is all I have
This is the cole I have written, which is called each time a control point is added:
if (bezierMode == CASTELJAU_ADAPTIVE) {
glColor3f (0.0f, 0.8f, 0.4f); /* draw adaptive casteljau curve in green */
for(i=0; i+3<numCV; i += 3)
adaptiveDeCasteljau3(CV, i, 0.01);
}
void adaptiveDeCasteljau3(float CV[MAX_CV][3], int position, float tolerance) {
float x01 = (CV[position][0] + CV[position+1][0]) / 2;
float y01 = (CV[position][1] + CV[position+1][1]) / 2;
float x12 = (CV[position+1][0] + CV[position+2][0]) / 2;
float y12 = (CV[position+1][1] + CV[position+2][1]) / 2;
float x23 = (CV[position+2][0] + CV[position+3][0]) / 2;
float y23 = (CV[position+2][1] + CV[position+3][1]) / 2;
float x012 = (x01 + x12) / 2;
float y012 = (y01 + y12) / 2;
float x123 = (x12 + x23) / 2;
float y123 = (y12 + y23) / 2;
float x0123 = (x012 + x123) / 2;
float y0123 = (y012 + y123) / 2;
float dx = CV[3][0] - CV[0][0];
float dy = CV[3][1] - CV[0][1];
float d2 = fabs(((CV[1][0] - CV[3][0]) * dy - (CV[1][1] - CV[3][1]) * dx));
float d3 = fabs(((CV[2][0] - CV[3][0]) * dy - (CV[2][1] - CV[3][1]) * dx));
if((d2 + d3)*(d2 + d3) < tolerance * (dx*dx + dy*dy)) {
glBegin(GL_LINE_STRIP);
glVertex2f(x0123, y0123);
glEnd();
return;
}
float tmpLEFT[4][3];
float tmpRIGHT[4][3];
tmpLEFT[0][0] = CV[0][0];
tmpLEFT[0][1] = CV[0][1];
tmpLEFT[1][0] = x01;
tmpLEFT[1][1] = y01;
tmpLEFT[2][0] = x012;
tmpLEFT[2][1] = y012;
tmpLEFT[3][0] = x0123;
tmpLEFT[3][1] = y0123;
tmpRIGHT[0][0] = x0123;
tmpRIGHT[0][1] = y0123;
tmpRIGHT[1][0] = x123;
tmpRIGHT[1][1] = y123;
tmpRIGHT[2][0] = x23;
tmpRIGHT[2][1] = y23;
tmpRIGHT[3][0] = CV[3][0];
tmpRIGHT[3][1] = CV[3][1];
adaptiveDeCasteljau3(tmpLEFT, 0, tolerance);
adaptiveDeCasteljau3(tmpRIGHT, 0, tolerance);
}
and obviously nothing is drawn. Do you have any idea?
the Begin / End should engulf your whole loop, not being inside for each isolated vertex !

Realtime Band-Limited Impulse Train Synthesis using SDL mixer

I'm trying to implement a audio synthesizer using this technique:
https://ccrma.stanford.edu/~stilti/papers/blit.pdf
I'm doing it in standard C, using SDL2_Mixer library.
This is my BLIT function implementation:
double blit(double angle, double M, double P) {
double x = M * angle / P;
double denom = (M * sin(M_PI * angle / P));
if (denom < 1)
return (M / P) * cos(M_PI * x) / cos(M_PI * x / M);
else {
double numerator = sin(M_PI * x);
return (M / P) * numerator / denom;
}
}
The idea is to combine it to generate a square wave, following the paper instructions. I setted up SDL2_mixer with this configuration:
SDL_AudioSpec *desired, *obtained;
SDL_AudioSpec *hardware_spec;
desired = (SDL_AudioSpec*)malloc(sizeof(SDL_AudioSpec));
obtained = (SDL_AudioSpec*)malloc(sizeof(SDL_AudioSpec));
desired->freq=44100;
desired->format=AUDIO_U8;
desired->channels=1;
desired->samples=2048;
desired->callback=create_rect;
desired->userdata=NULL;
And here's my create_rect function. It creates a bipolar impulse train, then it integrates it's value to generate a band-limited rect function.
void create_rect(void *userdata, Uint8 *stream, int len) {
static double angle = 0;
static double integral = 0;
int i = 0;
// This is the freq of my tone
double f1 = tone_table[current_wave.note];
// Sample rate
double fs = 44100;
// Pulse
double P = fs / f1;
int M = 2 * floor(P / 2) + 1;
double oldbipolar = 0;
double bipolar = 0;
for(i = 0; i < len; i++) {
if (++angle > P)
angle -= P;
double angle2 = angle + floor(P/2);
if (angle2 > P)
angle2 -= P;
bipolar = blit(angle2, M, P) - blit(angle, M, P);
integral += (bipolar + old bipolar) * 0.5;
oldbipolar = bipolar;
*stream++ = (integral + 0.5) * 127;
}
}
My problem is: the resulting wave is quite ok, but after few seconds it starts to make noises. I tried to plot the result, and here's it:
Any idea?
EDIT: Here's a plot of the bipolar BLIT before integrating it:

Resources