parallelizing Mandelbrot using MPI - c

I am trying to parallelize the Mandelbrot.
the correct output should be around 1.510659. however I am not getting that correctly.
** PROGRAM: Mandelbrot area
**
** PURPOSE: Program to compute the area of a Mandelbrot set.
** The correct answer should be around 1.510659.
**
** USAGE: Program runs without input ... just run the executable
**
reduction for numoutside.
this is my parallelized code
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <mpi.h>
#define NPOINTS 1000
#define MAXITER 1000
int P = 1;
struct d_complex
{
double r;
double i;
};
int testpoint(struct d_complex);
struct d_complex c;
struct d_complex cPart;
int numoutside = 1;
int main()
{
int i, j, row;
int res;
double area, error, eps = 1.0e-5;
int myrank, mysize;
double stsec, ensec, commtime, maxcommtime;
MPI_Status status;
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &mysize);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
stsec = MPI_Wtime();
// Loop over grid of points in the complex plane which contains the Mandelbrot set,
// testing each point to see whether it is inside or outside the set.
/*for (i = 0; i < NPOINTS; i ++)
{
for (j = 0; j < NPOINTS ; j++)
{
c.r = -2.0 + 2.5 * (double)(i) / (double)(NPOINTS) + eps;
c.i = 1.125 * (double)(j) / (double)(NPOINTS) + eps;
testpoint(c);
}
}*/
if (myrank == 0)
{
/* Begin User Program - the master */
//*
int outsum, nb_pixel = NPOINTS*NPOINTS ;
for (i = 0; i < nb_pixel; i++)
{
MPI_Recv(&res, 1, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
// printf("Slave id %d has send : %d \n", status.MPI_SOURCE, data[2]);
// printf("%d: [%d,%d] -> [%d,%d] = %d\n", status.MPI_SOURCE, data[0], data[1], data[0] + MAXX, data[1] + MAXY, data[2]);
res += numoutside;
}
area = 2.0 * 2.5 * 1.125 * (double)(NPOINTS * NPOINTS - res) / (double)(NPOINTS * NPOINTS);
error = area / (double)NPOINTS;
printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n", area, error);
printf("Finish.\n");
}
else
{
for (i = myrank; i < NPOINTS; i+=mysize)
{
for (j = 0; j < NPOINTS; j++)
{
c.r = -2.0 + 2.5 * (double)(i) / (double)(NPOINTS) + eps;
c.i = 1.125 * (double)(j) / (double)(NPOINTS) + eps;
res=testpoint(c);
MPI_Send(&res, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
}
}
// Calculate area of set and error estimate and output the results
MPI_Finalize();
ensec = MPI_Wtime();
commtime = ensec - stsec;
// area = 2.0 * 2.5 * 1.125 * (double)(NPOINTS * NPOINTS - numoutside) / (double)(NPOINTS * NPOINTS);
// error = area / (double)NPOINTS;
printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n", area, error);
if (myrank == 0)
{
printf("%.3f\n", commtime);
}
}
int testpoint(struct d_complex c)
{
// Does the iteration z=z*z+c, until |z| > 2 when point is known to be outside set
// If loop count reaches MAXITER, point is considered to be inside the set
struct d_complex z;
int iter;
double temp;
z = c;
for (iter = 0; iter < MAXITER; iter++)
{
temp = (z.r * z.r) - (z.i * z.i) + c.r;
z.i = z.r * z.i * 2 + c.i;
z.r = temp;
if ((z.r * z.r + z.i * z.i) > 4.0)
{
// MPI_Send( &numoutside, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
return numoutside;
break;
}
}
return 0;
}
the expectation is to get around 1.510659 when running the code with NPOINTS: 1000,2000 and 2,4, processors .

Related

Why MPI_Bcast is not working when cores >= 12 for a given input?

I have the current code working for cores under 12, using prints, code works until MPI_Bcast(&global_mean,...) prints before it are shown and after no:
Note: MPI custom type has been created and has been tested;
int main (int argc, char ** argv)
{
int pid, n_processors;
const int ROOT = 0;
int xsize, ysize, colmax;
pixel *src = (pixel*) malloc(sizeof(pixel) * MAX_PIXELS);
// Scatter receiver vector
pixel *receive_buffer;
int send_count, partial_sum, total_sum, global_mean, nump, remainder;
double global_time = 0.0;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &n_processors);
if(pid == ROOT){
/* Take care of the arguments */
if (argc != 3)
{
fprintf(stderr, "Usage: %s infile outfile\n", argv[0]);
exit(1);
}
/* Read file */
if(read_ppm (argv[1], &xsize, &ysize, &colmax, (char *) src) != 0)
exit(1);
if (colmax > 255)
{
fprintf(stderr, "Too large maximum color-component value\n");
exit(1);
}
printf("Has read the image, calling filter\n");
send_count = (xsize * ysize)/n_processors;
nump = xsize * ysize;
}
double start_time = MPI_Wtime();
MPI_Bcast(&send_count, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
// Initialize receive_buffer
receive_buffer = (pixel*)malloc(send_count * sizeof(pixel));
// 1. Scatter src array through different proccessors
MPI_Scatter(src, send_count, mpi_pixel_type, receive_buffer, send_count, mpi_pixel_type, ROOT, MPI_COMM_WORLD);
// 2. Do partial sums
int i;
partial_sum = 0;
for(i = 0; i < send_count; i++){
partial_sum += (unsigned int)receive_buffer[i].r + (unsigned int)receive_buffer[i].g + (unsigned int)receive_buffer[i].b;
}
MPI_Reduce(&partial_sum, &total_sum, 1, MPI_INT, MPI_SUM, ROOT, MPI_COMM_WORLD);
// Calculate missing pixels
if(pid == ROOT){
remainder = nump % n_processors;
for(int i = nump - remainder; i < nump; i++)
total_sum += (unsigned int)receive_buffer[i].r + (unsigned int)receive_buffer[i].g + (unsigned int)receive_buffer[i].b;
}
// 3. Calculate mean
if(pid == ROOT)
global_mean = total_sum/nump;
MPI_Bcast(&global_mean, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
// 4. Apply algorithm
MPI_Scatter(src, send_count, mpi_pixel_type, receive_buffer, send_count, mpi_pixel_type, ROOT, MPI_COMM_WORLD);
unsigned int psum;
for(i = 0; i < send_count; i++){
psum = (unsigned int)receive_buffer[i].r + (unsigned int)receive_buffer[i].g + (uint)receive_buffer[i].b;
if(global_mean > psum)
receive_buffer[i].r = receive_buffer[i].g = receive_buffer[i].b = 0;
else
receive_buffer[i].r = receive_buffer[i].g = receive_buffer[i].b = 255;
}
// 5. Gather partial results
MPI_Gather(receive_buffer, send_count, mpi_pixel_type, src, send_count, mpi_pixel_type, ROOT, MPI_COMM_WORLD);
if(pid == ROOT){
// printf("Reamainder: %d\n", remainder);
for(i = nump - remainder; i < nump; i++){
psum = (unsigned int)src[i].r + (unsigned int)src[i].g + (uint)src[i].b;
if(global_mean > psum)
src[i].r = src[i].g = src[i].b = 0;
else
src[i].r = src[i].g = src[i].b = 255;
}
}
double end_time = MPI_Wtime();
global_time += end_time - start_time;
if(pid == ROOT){
printf("Filtering took: %g secs\n", global_time) ;
/* Write result */
printf("Writing output file\n\n");
if (write_ppm(argv[2], xsize, ysize, (char *)src) != 0)
exit(1);
}
MPI_Finalize();
return 0;
}
Under core < 12 is working fine but when cores >= 12
= BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES
= PID 56361 RUNNING AT sigma.nsc.liu.se
= EXIT CODE: 11
= CLEANING UP REMAINING PROCESSES
= YOU CAN IGNORE THE BELOW CLEANUP MESSAGES
Where did I do wrong in my code? Why stops working only when certain cores is reached?
Why stops working...
Likely because your process is attempting to access memory it does not own, leading to the error: BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES
Here you are creating memory for send_count pixels in receive_buffer:
receive_buffer = (pixel*)malloc(send_count * sizeof(pixel));
Then here, you are indexing one past the memory you have created:
for(i = 0; i <= send_count; i++){
^^
psum = (unsigned int)receive_buffer[i].r + (unsigned int)receive_buffer[i].g + (uint)receive_buffer[i].b;
if(global_mean > psum)
Change to
for(i = 0; i < send_count; i++){
^
The error was caused because I was computing with receive buffer after MPI_Reduction and should have used src :)
Thanks all for your suggestions

Cuda program for matrix batch multiplication

I am a novice in the field of CUDA program and I am trying to repeat the function of cublasSgemmBatched, which means that I want to perform the matrix-matrix multiplication of a batch of matrices. I try to implement my idea as the following code.
#include <stdio.h>
__global__ void BatchMulCUDA(float* array1, float* array2, int narray1, int dim, float* result)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx < narray1 * dim)
{
float temp = 0;
int index = tx / dim;
#pragma
for (int i = 0; i < dim; i++)
{
temp += array1[tx * dim + i] * array2[index * dim + i];
}
result[tx] = temp;
}
}
void BatchMulGPU(float* array1, float* array2, int narray1, int dim, float* result)
{
dim3 threads(1024, 1);
dim3 grid(narray1 / 1024 + 1, 1);
int threadsPerBlock = threads.x * threads.y;
int blocksPerGrid = grid.x * grid.y;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
BatchMulCUDA<<<grid, threads>>>(array1, array2, narray1, dim, result);
}
However, strangely, I found that I can get the right output before the index 19730. After the element of 19730, the output of GPU is always 0. I do not know what the problem is. The CPU version of my code and test function are as the following. Is there any hardware limitation that I do not realize?
#include "kernel.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <sys/time.h>
#include <math.h>
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double)tp.tv_usec*1e-6);
}
void BatchMulCPU(float* array1, float* array2, int narray1, int dim, float* result)
{
for (int i = 0; i < narray1 * dim; i++)
{
float temp = 0;
int index = i / dim;
for (int j = 0; j < dim; j++)
{
temp += array1[i * dim + j] * array2[index * dim + j];
}
result[i] = temp;
}
}
int main(int argc, char** argv)
{
int narray1 = 6980;
int dim = 4;
float* array1 = new float[narray1 * dim * dim];
float* array2 = new float[narray1 * dim];
float* resultGPU = new float[narray1 * dim];
float* resultCPU = new float[narray1 * dim];
float* d_array1;
float* d_array2;
float* d_result;
for (int i = 0; i < narray1 * dim * dim; i++)
{
array1[i] = static_cast<float> (rand() / (static_cast<float> (RAND_MAX / 10)));
}
for (int i = 0; i < narray1 * dim; i++)
{
array2[i] = static_cast<float> (rand() / (static_cast<float> (RAND_MAX / 10)));
}
cudaError_t err;
double iStart = cpuSecond();
err = cudaMalloc((void**)&d_array1, narray1 * dim * dim * sizeof(float));
err = cudaMalloc((void**)&d_array2, narray1 * dim * sizeof(float));
err = cudaMalloc((void**)&d_result, narray1 * dim * sizeof(float));
err = cudaMemcpy(d_array1, array1, narray1 * dim * dim * sizeof(float), cudaMemcpyHostToDevice);
err = cudaMemcpy(d_array2, array2, narray1 * dim * sizeof(float), cudaMemcpyHostToDevice);
BatchMulGPU(d_array1, d_array2, narray1, dim, d_result);
err = cudaMemcpy(resultGPU, d_result, narray1 * dim * sizeof(float), cudaMemcpyDeviceToHost);
double iElaps = cpuSecond() - iStart;
printf("Total GPU computation time is %lf \n" , iElaps);
iStart = cpuSecond();
BatchMulCPU(array1, array2, narray1, dim, resultCPU);
iElaps = cpuSecond() - iStart;
printf("Total CPU computation time is %lf \n" , iElaps);
float error = 0;
float temp = 0;
for (long i = 0; i < narray1 * dim; i++)
{
// temp = abs(resultCPU[i] - resultGPU[i]);
// if (temp > 0.5)
// {
// std::cout << i << std::endl;
// }
error += abs(resultCPU[i] - resultGPU[i]);
}
printf("Error is %f \n", error);
// for (int i = 19730; i < 19750; i++)
// {
// std::cout << "GPU " << resultGPU[i] << std::endl;
// std::cout << "CPU " << resultCPU[i] << std::endl;
// }
cudaFree(d_array1);
cudaFree(d_array2);
cudaFree(d_result);
return 0;
}
Apart from the possibility of a WDDM TDR timeout as discussed in the comments, the code has an error.
Its evident that the kernel design expects that a total grid size (total number of threads) will be launched that is equal to or greater than the number of arrays times the side dimension:
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx < narray1 * dim)
i.e. narray1*dim are the needed number of threads
However the number being launched is only narray1:
dim3 threads(1024, 1);
dim3 grid(narray1 / 1024 + 1, 1);
If we change the last line above to:
dim3 grid((narray1*dim) / 1024 + 1, 1);
this code design error will be addressed.
The reason the code works correctly for small number of matrices (anything up to 256) is because of the rounding-up effect in the grid sizing to a minimum of 1024 threads, which is 256*4 (narray1 * dim).
As an aside, this code is not functionally similar to cublasSgemmBatched from what I can see. I don't recognize this code as being any matrix multiplication (matrix dot product) that I am familiar with.

MPI_Gatherv doesn't work correctly

I write code that do multiplying a Vector by a Matrix. I use the MPI. The matrix is distributed of chunks which consist of rows. The chunks size not always be equal maybe. The chunks are worked correct, but when i try to run this i get half empty vector. However, I was expected to receive a full vector. Please look at the attached code. I think the problem is in the MPI_Gatherv function.
#include <stdio.h>
#include <mpi.h>
#include <stdlib.h>
#define COLUMN 4
#define ROW 7
#define dp 100.0f
// Local start
#define chunk_low(commrank, commsize, nvert) \
((commrank) * (nvert) / (commsize))
// Local end
#define chunk_height(commrank, commsize, nvert) \
(chunk_low((commrank) + 1, commsize, nvert) - 1)
// Local size
#define chunk_size(commrank, commsize, nvert) \
(chunk_height(commrank, commsize, nvert) - \
chunk_low(commrank, commsize, nvert) + 1)
// Matrix initialization function
void init_matrix(int column, int row, float *matrix)
{
int j, i;
printf("\nMatrix\n");
for(i=0; i < row; i++){
for(j=0; j < column; j++){
matrix[i*column+j] = i * column + j; // (float)rand()/RAND_MAX * dp *2.0f - dp;
printf(" %f ", matrix[i * column + j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char **argv)
{
int rank, size;
int i, j;
float *vm, *local_matrix, *result, *vector;
double time1, time2;
int *displs, *rcounts, *scounts;
vm = (float *)calloc(ROW * COLUMN, sizeof(float));
vector = malloc(COLUMN * sizeof(float));
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
/* Process 0 - master */
if (rank==0)
{
printf("\nNumbers of proccesses %d. \nElements in vector %d.\n", size, COLUMN);
/* Init vector vA */
init_matrix(COLUMN, ROW, vm);
for (i = 0; i < COLUMN; i++) {
vector[i] = (11 * 5) + (11 * i);
}
result = (float *)calloc(ROW, sizeof(float));
//Time begining calculating of programm
time1=MPI_Wtime();
}
/* End of work process 0 */
displs = (int *)malloc(sizeof(int) * size);
scounts = (int *)malloc(sizeof(int) * size);
rcounts = (int *)malloc(sizeof(int) * size);
for (i = 0; i < size; i++) {
displs[i] = chunk_low(i, size, ROW) * COLUMN; // Position initialization
rcounts[i] = scounts[i] = chunk_size(i, size, ROW) * COLUMN;
}
local_matrix = (float *)calloc(chunk_size(rank, size, ROW) * COLUMN, sizeof(float));
MPI_Bcast(vector, COLUMN, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Scatterv(vm, scounts, displs, MPI_FLOAT, local_matrix,
rcounts[rank], MPI_FLOAT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
int local_row = scounts[rank] / COLUMN;
float *local_result = (float *)calloc(local_row, sizeof(float));;
for(i = 0; i < local_row; i++) {
for (j = 0; j < COLUMN; j++) {
local_result[i] += local_matrix[i * COLUMN + j] * vector[j];
}
}
MPI_Gatherv(local_result, local_row, MPI_FLOAT, result, rcounts, displs, MPI_FLOAT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
/* Only master-process */
if (rank==0)
{
//Time ending programm
time2=MPI_Wtime();
printf("\nTime parallel calculation = %f s.\n",time2-time1);
for (i = 0; i < ROW; i++)
{
printf(" %f\n", result[i]);
}
}
// End work of master-process
/* Delete storage arrays of process */
free(displs);
free(scounts);
free(rcounts);
free(local_matrix);
MPI_Finalize();
return 0;
}
After run this code i was expected:
484.000000
1628.000000
2772.000000
3916.000000
5060.000000
6204.000000
7348.000000
But get this result:
484.000000
1628.000000
0.000000
0.000000
0.000000
0.000000
0.000000
The problem was articulated in the displs, which we passed to MPI_Gatherv

Mandelbrot set using MPICH. Trying to learn but can't figure this bug out

I am trying to implement a master/slave relationship which solves the mandelbrot set and prints it into a ppm file. This is what I have so far:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <mpi/mpi.h>
int calculateMan (double , double ); //calculateMandelbrotSet
MPI_Status status;
struct Number {
double R;
double i;
} Z,C;
const int color;
int colorTemp; //color value
const int max = 1000; //max iteration value
const int ResHeight = 800; //Resolution
const int ResWidth = 800;
double CRMax = 1.5;
double CIMax = 2.0;
double CRMin = -2.5;
double CIMin = -2.0; //Constant values
double colorWidth;
double colorHeight;
int main (int argc, char** argv) {
int rank, size = 0;
int nodos, source, dest;
double startTime, endTime;
//Rank = current process ID
//Size = amount of processes
MPI_Init (&argc, &argv); // starts MPI
startTime = MPI_Wtime();
MPI_Comm_size (MPI_COMM_WORLD, &size); // get number of processes
MPI_Comm_rank (MPI_COMM_WORLD, &rank); // get current process
nodos = size - 1;
if (rank == 0) { // MASTER --------------------------------------
colorHeight = (CIMax - CIMin) / ResHeight;
colorWidth = (CRMax - CRMin) / ResWidth;
FILE *fp;
fp = fopen("Mandelbrot.ppm","w");
fprintf(fp,"P3\n %d\n %d\n %d\n",ResWidth,ResHeight,255); //Magic Number & Header
for (int row = 0; row < ResHeight; row++) {
C.i= CIMin + row*colorHeight;
for (int column = 0; column < ResWidth; column++) {
C.R = CRMin + column*colorWidth;
//data sends
for (dest = 1; dest <= nodos; dest++) {
MPI_Send(&C.R, sizeof(double), MPI_DOUBLE, dest, column, MPI_COMM_WORLD);
MPI_Send(&C.i, sizeof(double), MPI_DOUBLE, dest, column, MPI_COMM_WORLD);
}
}
}
for (int row = 0; row < ResHeight; row++) {
for (int column = 0; column < ResWidth; column++) {
//Recv and print
MPI_Recv(&colorTemp, sizeof(int), MPI_DOUBLE, source, 0, MPI_COMM_WORLD, &status);
fprintf(fp, "%d %d %d\n", colorTemp, 1,3);
}
}
fclose(fp);
} //------------------------- END MASTER
if (rank > 0) // START SLAVE --------------------------------------
{
for (int row = 0; row < ResHeight; row++) {
for (int column = 0; column < ResWidth; column++) {
MPI_Recv(&C.R, sizeof(double), MPI_DOUBLE, 0, column, MPI_COMM_WORLD, &status);
MPI_Recv(&C.i, sizeof(double), MPI_DOUBLE, 0, column, MPI_COMM_WORLD, &status);
colorTemp = calculateMan(C.R, C.i);
MPI_Send(&colorTemp, sizeof(int), MPI_INT, 0, 0, MPI_COMM_WORLD);
}
}
} // SLAVE END---------------------------------
endTime = MPI_Wtime(); //stop timer
MPI_Finalize(); //end MPI
printf("Time: %.6f\n", endTime-startTime);
exit(0); //end program
}
int calculateMan (double CReal, double CImaginary) {
int i = 0;
Z.R = 0.0;
Z.i = 0.0;
while (((i < max) && (Z.R*Z.R) + (Z.i * Z.i) < 4))
{
double temp = (Z.R * Z.R) - (Z.i * Z.i) + CReal;
Z.i = 2.0 * Z.R * Z.i + CImaginary;
Z.R = temp;
i++;
}
if (i == max)
return 0; //interior is black
else
return 255; //exterior white
}
I am trying to run my program but I cannot figure out why the RECV and print have an infinite iteration. Also, can anyone have a look at the code and tell me any sort of other issues or things I should look out for, for future reference?
Thanks!

How to fix my Pthreads code about Mandelbrot set?

I have the following Pthreads code about calculating and creating a picture of the Mandelbrot set. My code in C works just fine and it prints the resulting picture nicely. The point is that using the below code, I am able to compile the code and execute it. Afterwards, if I try to view the resulting .ppm file in Gimp, it simply cannot open it. I guess I'm doing something wrong in my code. If someone could help me I would be glad.
// mandpthread.c
// to compile: gcc mandpthread.c -o mandpthread -lm -lrt -lpthread
// usage: ./mandpthread <no_of_iterations> <no_of_threads> > output.ppm
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include <pthread.h>
typedef struct {
int r, g, b;
} rgb;
int NITERATIONS, NTHREADS;
rgb **m;
void color(rgb **m, int x, int y, int red, int green, int blue)
{
m[y][x].r = red;
m[y][x].g = green;
m[y][x].b = blue;
}
void mandelbrot(int tid)
{
int w = 600, h = 400, x, y;
// each iteration, it calculates: newz = oldz*oldz + p,
// where p is the current pixel, and oldz stars at the origin
double pr, pi; // real and imaginary part of the pixel p
double newRe, newIm, oldRe, oldIm; // real and imaginary parts of new and old z
double zoom = 1, moveX = -0.5, moveY = 0; // you can change these to zoom and change position
int start = tid * NITERATIONS/NTHREADS;
int end = (tid+1) * (NITERATIONS/NTHREADS) - 1;
//loop through every pixel
for(y = 0; y < h; y++) {
for(x = 0; x < w; x++) {
// calculate the initial real and imaginary part of z,
// based on the pixel location and zoom and position values
pr = 1.5 * (x - w / 2) / (0.5 * zoom * w) + moveX;
pi = (y - h / 2) / (0.5 * zoom * h) + moveY;
newRe = newIm = oldRe = oldIm = 0; //these should start at 0,0
// i will represent the number of iterations
int i;
// start the iteration process
for(i = start; i <= end; i++) {
// remember value of previous iteration
oldRe = newRe;
oldIm = newIm;
// the actual iteration, the real and imaginary part are calculated
newRe = oldRe * oldRe - oldIm * oldIm + pr;
newIm = 2 * oldRe * oldIm + pi;
// if the point is outside the circle with radius 2: stop
if((newRe * newRe + newIm * newIm) > 4) break;
}
if(i == NITERATIONS)
color(m, x, y, 0, 0, 0); // black
else
{
// normalized iteration count method for proper coloring
double z = sqrt(newRe * newRe + newIm * newIm);
int brightness = 256. * log2(1.75 + i - log2(log2(z))) / log2((double)NITERATIONS);
color(m, x, y, brightness, brightness, 255);
}
}
}
}
// worker function which will be passed to pthread_create function
void *worker(void *arg)
{
int tid = (int)arg;
mandelbrot(tid);
}
int main(int argc, char *argv[])
{
pthread_t* threads;
int i, j, rc;
if(argc != 3)
{
printf("Usage: %s <no_of_iterations> <no_of_threads> > output.ppm\n", argv[0]);
exit(1);
}
NITERATIONS = atoi(argv[1]);
NTHREADS = atoi(argv[2]);
threads = (pthread_t*)malloc(NTHREADS * sizeof(pthread_t));
m = malloc(400 * sizeof(rgb *));
for(i = 0; i < 400; i++)
m[i] = malloc(600 * sizeof(rgb));
// declaring the needed variables for calculating the running time
struct timespec begin, end;
double time_spent;
// starting the run time
clock_gettime(CLOCK_MONOTONIC, &begin);
printf("P6\n# AUTHOR: ET\n");
printf("%d %d\n255\n",600,400);
for(i = 0; i < NTHREADS; i++) {
rc = pthread_create(&threads[i], NULL, worker, (void *)i);
assert(rc == 0); // checking whether thread creating was successfull
}
for(i = 0; i < NTHREADS; i++) {
rc = pthread_join(threads[i], NULL);
assert(rc == 0); // checking whether thread join was successfull
}
// printing to file
for(i = 0; i < 400; i++) {
for(j = 0; j < 600; j++) {
fputc((char)m[i][j].r, stdout);
fputc((char)m[i][j].g, stdout);
fputc((char)m[i][j].b, stdout);
}
}
// ending the run time
clock_gettime(CLOCK_MONOTONIC, &end);
// calculating time spent during the calculation and printing it
time_spent = end.tv_sec - begin.tv_sec;
time_spent += (end.tv_nsec - begin.tv_nsec) / 1000000000.0;
fprintf(stderr, "Elapsed time: %.2lf seconds.\n", time_spent);
for(i = 0; i < 400; i++)
free(m[i]);
free(m);
free(threads);
return 0;
}
The newest version of your code works for me with 100 iterations and 1 thread.
Doing two threads fails, because the ppm file has 2 headers one from each thread.
If I delete one of the headers, the image loads but the colours are off and there's a glitch in the image.

Resources