Open MPI Waitall() Segmentation Fault - c

I'm new with MPI and I'm trying to develop a non-blocking programm (with Isend and Irecv). The functionality is very basic (it's educational):
There is one process (rank 0) who is the master and receives messages from the slaves (rank 1-P). The master only receives results.
The slaves generates an array of N random numbers between 0 and R and then they do some operations with those numbers (again, it's just for educational purpose, the operations don't make any sense)
This whole process (operations + send data) is done M times (this is just for comparing different implementations; blocking and non-blocking)
I get a Segmentation Fault in the Master process when I'm calling the MPI_waitall() funcion
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#include <math.h>
#include <time.h>
#define M 1000 //Number of times
#define N 2000 //Quantity of random numbers
#define R 1000 //Max value of random numbers
double SumaDeRaices (double*);
int main(int argc, char* argv[]) {
int yo; /* rank of process */
int p; /* number of processes */
int dest; /* rank of receiver */
/* Start up MPI */
MPI_Init(&argc, &argv);
/* Find out process rank */
MPI_Comm_rank(MPI_COMM_WORLD, &yo);
/* Find out number of processes */
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Request reqs[p-1];
MPI_Status stats[p-1];
if (yo == 0) {
int i,j;
double result;
clock_t inicio,fin;
inicio = clock();
for(i = 0; i<M; i++){ //M times
for(j = 1; j<p; j++){ //for every slave
MPI_Irecv(&result, sizeof(double), MPI_DOUBLE, j, i, MPI_COMM_WORLD, &reqs[j-1]);
}
MPI_Waitall(p-1,reqs,stats); //wait all slaves (SEG_FAULT)
}
fin = clock()-inicio;
printf("Tiempo total de ejecucion %f segundos \n", ((double)fin)/CLOCKS_PER_SEC);
}
else {
double* numAleatorios = (double*) malloc( sizeof(double) * ((double) N) ); //array with numbers
int i,j;
double resultado;
dest=0;
for(i=0; i<M; i++){ //again, M times
for(j=0; j<N; j++){
numAleatorios[j] = rand() % R ;
}
resultado = SumaDeRaices(numAleatorios);
MPI_Isend(&resultado,sizeof(double), MPI_DOUBLE, dest, i, MPI_COMM_WORLD,&reqs[p-1]); //send result to master
}
}
/* Shut down MPI */
MPI_Finalize();
exit(0);
} /* main */
double SumaDeRaices (double* valores){
int i;
double sumaTotal = 0.0;
//Raices cuadradas de los valores y suma de estos
for(i=0; i<N; i++){
sumaTotal = sqrt(valores[i]) + sumaTotal;
}
return sumaTotal;
}

There are several issues with your code. First and foremost in your Isend you pass &resultado several times without waiting until previous non-blocking operation finishes. You are not allowed to reuse the buffer you pass to Isend before you make sure the operation finishes.
Instead I recommend you using normal Send, because in contrast to synchronous send (SSend) normal blocking send returns as soon as you can reuse the buffer.
Second, there is no need to use message tags. I recommend you to just set tag to 0. In terms of performance it is simply faster.
Third, the result shouldn't be a simple variable, but an array of size at least (p-1)
Fourth, I do not recommend you to allocate arrays on stack, like MPI_Request and MPI_Status if the size is not a known small number. In this case the size of array is (p-1), so you better use malloc for this data structure.
Fifth, if you do not check status, use MPI_STATUSES_IGNORE.
Also instead of sizeof(double) you should specify number of items (1).
But of course the absolutely best version is just to use MPI_Gather.
Moreover, generally there is no reason not to run computations on the root node.
Here is slightly rewritten example:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#include <math.h>
#include <time.h>
#define M 1000 //Number of times
#define N 2000 //Quantity of random numbers
#define R 1000 //Max value of random numbers
double SumaDeRaices (double* valores)
{
int i;
double sumaTotal = 0.0;
//Raices cuadradas de los valores y suma de estos
for(i=0; i<N; i++) {
sumaTotal = sqrt(valores[i]) + sumaTotal;
}
return sumaTotal;
}
int main(int argc, char* argv[]) {
int yo; /* rank of process */
int p; /* number of processes */
/* Start up MPI */
MPI_Init(&argc, &argv);
/* Find out process rank */
MPI_Comm_rank(MPI_COMM_WORLD, &yo);
/* Find out number of processes */
MPI_Comm_size(MPI_COMM_WORLD, &p);
double *result;
clock_t inicio, fin;
double *numAleatorios;
if (yo == 0) {
inicio = clock();
}
numAleatorios = (double*) malloc( sizeof(double) * ((double) N) ); //array with numbers
result = (double *) malloc(sizeof(double) * p);
for(int i = 0; i<M; i++){ //M times
for(int j=0; j<N; j++) {
numAleatorios[j] = rand() % R ;
}
double local_result = SumaDeRaices(numAleatorios);
MPI_Gather(&local_result, 1, MPI_DOUBLE, result, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); //send result to master
}
if (yo == 0) {
fin = clock()-inicio;
printf("Tiempo total de ejecucion %f segundos \n", ((double)fin)/CLOCKS_PER_SEC);
}
free(numAleatorios);
/* Shut down MPI */
MPI_Finalize();
} /* main */

Related

Not quite understanding MPI

I am attempting to make a program using MPI that will find the value of PI using MPI.
Currently I can find the sum this way:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUMSTEPS 1000000
int main() {
int i;
double x, pi, sum = 0.0;
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
double step = 1.0/(double) NUMSTEPS;
x = 0.5 * step;
for (i=0;i<= NUMSTEPS; i++){
x+=step;
sum += 4.0/(1.0+x*x);
}
pi = step * sum;
clock_gettime(CLOCK_MONOTONIC, &end);
u_int64_t diff = 1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec;
printf("PI is %.20f\n",pi);
printf("elapsed time = %llu nanoseconds\n", (long long unsigned int) diff);
return 0;
}
But this does not use MPI.
So I have tried to make my own in MPI. My logic is:
Split the 1000000 into equal parts based on how many processors I have
Calculate the values for each range
Send the calculated value back to the master and then divide by the number of processors. I would like to keep the main thread free and not do any work. Similar to a master-slave system.
Here's what I have currently. This doesn't seem to be working and the send/receive gives errors about incompatible variables for receive and send.
#include <mpi.h>
#include <stdio.h>
#include <string.h>
#define NUMSTEPS 1000000
int main(int argc, char** argv) {
int comm_sz; //number of processes
int my_rank; //my process rank
// Initialize the MPI environment
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &comm_sz);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
// Slaves
if (my_rank != 0) {
// Process math then send
int i;
double x, pi, sum = 0.0;
double step = 1.0/(double) NUMSTEPS;
x = 0.5 * step;
// Find the start and end for the number
int processors = comm_sz - 1;
int thread_multi = NUMSTEPS / processors;
int start = my_rank * thread_multi;
if((my_rank - 1) != 0){
start += 1;
}
int end = start + thread_multi ;
for (i=start; i <= end; i++){
x+=step;
sum += 4.0 / (1.0 + x * x);
}
pi = step * sum;
MPI_Send(pi, 1.0, MPI_DOUBLE 1, 0, MPI_COMM_WORLD);
// Master
} else {
// Things in here only get called once.
double pi = 0.0;
double total = 0.0;
for (int q = 1; q < comm_sz; q++) {
MPI_Recv(pi, 1, MPI_DOUBLE, q, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
total += pi;
pi = 0.0;
}
// Take the added totals and divide by amount of processors that processed, to get the average
double finished = total / (comm_sz - 1);
// Print sum here
printf("Pi Is: %d", finished);
}
// Finalize the MPI environment.
MPI_Finalize();
}
I've currently spent around 3 hours working on this. Never used MPI. Any help would be greatly appreciated.
Try compiling with more compiler warnings and try to fix them, for instance -Wall -Wextra should give you excellent clues about what the issues are.
According to MPI_Send documentation the first argument is a pointer, so you seem to be ignoring an automatic "conversion to pointer" error. You have the same issue in the MPI_Recv() call.
You can try to pass pi as &pi in MPI_Recv and MPI_Send and check if that fixes the error.
As a comment, you can declare dummy variables as pi as a local variables inside the master loop to avoid side-effects:
for (int q = 1; q < comm_sz; q++) {
double pi = 0;
MPI_Recv(&pi, 1, MPI_DOUBLE, q, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
total += pi;
}

MPI_Scatter is receiving wrong values

My goal is to take an array of 6 integers and split them among 3 processes. However, the numbers in receiveBuffer are not correct. I don't know why the three processes don't contain integers from the original array.
#include<stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include "mpi.h"
#define ARRAY_SIZE 6
// simple print array method
void printArray(int arr[], int size)
{
int i;
for (i=0; i < size; i++)
printf("%d ", arr[i]);
printf("\n");
}
int main (int argc, char *argv[])
{
srand(time(NULL));
int array[ARRAY_SIZE];
int rank, numNodes;
// fill array with random numbers and print
for(int i = 0; i < ARRAY_SIZE; i++)
array[i] = rand();
printArray(array, ARRAY_SIZE);
MPI_Init( &argc, &argv );
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size (MPI_COMM_WORLD,&numNodes);
int receiveBuffer[ARRAY_SIZE/numNodes];
if(rank == 0)
{
MPI_Scatter(array, ARRAY_SIZE/numNodes, MPI_INT, &receiveBuffer, 0, MPI_INT, rank, MPI_COMM_WORLD);
}
printf("ID: %d with %d items.\n", rank, ARRAY_SIZE/numNodes);
printArray(receiveBuffer, ARRAY_SIZE/numNodes);
MPI_Finalize();
return 0;
}
Additionally, why is the original array printing for each process? Doesn't parallelization begin after INIT?
There are two issues with your usage of MPI_Scatter()
MPI_Scatter() is a collective operation, and has hence to be invoked by all the ranks of the communicator (e.g. not only rank zero)
since you use MPI_INT for both send and receive datatype, the send and receive counts should be equal (e.g. use ARRAY_SIZE/numNodes instead of 0)
The MPI standard does not specify what happens before MPI_Init(), and it is very common that mpirun spawns all the tasks, so they all execute the code before MPI_Init(). That's why MPI_Init() is generally invoked at the very beginning of a MPI program.

Efficient usage of MPI_Allreduce

I have on every processor a list range with numbers. I want to determine the maximum number of every row of these lists range.
The first four lists range for every processor P0-P3. The red list contains the maximum values of each row which every processor after MPI_Allreduce gets.
Here is a working version of my code:
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <mpi.h>
//#define KEY_MAX 100
typedef struct{
int myrank;
int numprocs;
int *range;
} SubDomainKeyTree;
void compRange(SubDomainKeyTree *s, int myrank, int numprocs){
s->myrank = myrank;
s->numprocs = numprocs;
// Allocate memory for (numprocs+1) ranges
s->range = malloc((numprocs+1) * sizeof(int));
// Compute range values
for(int p=0; p<=numprocs; p++){
s->range[p] = rand()%100;
}
for(int p=0; p<s->numprocs; p++){
if(s->myrank == p){
for(int k=0; k<=s->numprocs; k++){
printf("Processor %d: %d random number is %d\n", p, k, s->range[k]);
}
printf("\n");
}
}
}
void compDynRange(SubD *s){
int rangeForAll[s->numprocs+1];
//////////////////////////////////
// This is not really efficient //
//////////////////////////////////
for(int r=0; r<=s->numprocs; r++){
MPI_Allreduce(&s->range[r], &rangeForAll[r], 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
}
for(int p=0; p<s->numprocs; p++){
if(s->myrank == p){
for(int k=0; k<=s->numprocs; k++){
s->range[k] = rangeForAll[k];
printf("Processor %d: %d random number after MPI_Allreduce is %d\n", p, k, s->range[k]);
}
printf("\n");
}
}
}
int main(int argc, char **argv){
int nameLen;
char processorName[MPI_MAX_PROCESSOR_NAME];
int myrank; // Rank of processor
int numprocs; // Number of processes
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Get_processor_name(processorName,&nameLen);
MPI_Status status;
time_t t;
srand((unsigned)time(NULL)+myrank*numprocs+nameLen);
SubD s;
compRange(&s, myrank, numprocs);
compDynRange(&s);
MPI_Finalize();
return 0;
}
I use a for-loop which seems highly inefficient to me. Here I compute the maximum value of every row of all lists one after the other.
But can I use MPI_Allreduce without that for-loop?
I already tried that instead of the for-loop which does not work.
MPI_Allreduce(&s->range, &rangeForAll, s->numprocs+1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
Can someone give me a hint how I can do that?
As already hinted in comment, the error you had in you code was that instead of passing the arrays containing your send and receive buffers, you were passing some pointers to them. I imagine that this error was simply coming fro the change from a single element used initially (like &s->range[r]) which was perfectly correct, to the full array by just removing the indexed access (ie &s->range) which was wrong.
So as explained, using:
MPI_Allreduce(s->range, rangeForAll, s->numprocs+1, MPI_INT, MPI_MAX, MPI_COMM_WORLD)
just does the trick. However, since you want to get the results into the s->range arrays rather than the temporary rangeFarAll ones, you'd better off not defining the later at all, and use the MPI_IN_PLACE keyword as sending parameter and s->range as receiving one. The call becomes:
MPI_Allreduce(MPI_IN_PLACE, s->range, s->numprocs+1, MPI_INT, MPI_MAX, MPI_COMM_WORLD)
and s->range acts both as sending and receiving buffer. Therefore, the final results will all be in the s->range buffers after the call, sparing you the need of doing the copy explicitly.

Trying to make FFTW3 MPI work, getting zeros

I am trying to make the 2D MPI FFTW example from http://www.fftw.org/doc/2d-MPI-example.html#g_t2d-MPI-example work.
The example code is not complete so I have to write some extra lines to make it compile and test (below is the code).
For some reason, the output (in place FFT) is just zeros as far as I can tell.
The output after transform is simply 0 0 (all zeros).
Am I using the MPI FFTW in the wrong way? The example code is simple enough.
// compile with: mpicc simple_mpi_example.c -Wl,-rpath=/usr/local/lib -lfftw3_mpi -lfftw3 -o simple_mpi_example */
#include <fftw3-mpi.h>
int main(int argc, char **argv){
const ptrdiff_t N0 = 1000, N1 = 1000;
fftw_plan plan;
fftw_complex *data; //local data of course
ptrdiff_t alloc_local, local_n0, local_0_start, i, j;
MPI_Init(&argc, &argv);
fftw_mpi_init();
/* get local data size and allocate */
alloc_local = fftw_mpi_local_size_2d(N0, N1, MPI_COMM_WORLD,
&local_n0, &local_0_start);
data = (fftw_complex *) fftw_malloc(sizeof(fftw_complex) * alloc_local);
MPI_Barrier(MPI_COMM_WORLD);
printf("%i %i\n", local_0_start, local_n0);
MPI_Barrier(MPI_COMM_WORLD);
/* create plan for forward DFT */
plan = fftw_mpi_plan_dft_2d(N0, N1, data, data, MPI_COMM_WORLD,
FFTW_FORWARD, FFTW_ESTIMATE);
/* initialize data to some function my_function(x,y) */
for (i = 0; i < local_n0; ++i) for (j = 0; j < N1; ++j){
data[i*N1 + j][0]=local_0_start + i;
data[i*N1 + j][1]=i;
}
MPI_Barrier(MPI_COMM_WORLD);
printf("%f %f\n", data[10*N1 + 10][0], data[10*N1 + 10][1]);
MPI_Barrier(MPI_COMM_WORLD);
/* compute transforms, in-place, as many times as desired */
fftw_execute(plan);
printf("%f %f\n", data[10*N1 + 10][0], data[10*N1 + 10][1]);
fftw_destroy_plan(plan);
fftw_free(data);
MPI_Finalize();
printf("finalize\n");
return 0;
}

MPI Segmentationi fault MPI_Scatter using C

I am new in this area and use OpenMPI and C. I try to find out why my code leads to an Segmentatioin fault. I red already a lot about MPI but I did not find any help. It took me already hours. So I decided to ask here for help.
I get the expected result of my code. But I also get every time an error message.
Is it right how I use MPI_Scatter in my case?
Here is my simple code:
#include <stdio.h>
#include "mpi.h"
#include <stdlib.h>
const int MASTER_RANK = 0;
#define DIM 3
int main(int argc, char* argv[])
{
int numProc, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numProc);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int n = 9;
double *m;
double *m_client;
m_client = (double *)malloc(3);
if(rank == MASTER_RANK)
{
m = (double* )malloc(n);
for(int i=0; i<n; i++)
{
m[i] = (double)i+1.0;
}
}
MPI_Scatter(m, 3, MPI_DOUBLE, m_client, 3, MPI_DOUBLE, MASTER_RANK, MPI_COMM_WORLD);
printf("Process %d:\n", rank);
for(int i=0; i < 3; i++)
{
printf(" (%lf", m_client[i]);
m_client[i] += 1000*rank;
printf(" -> %lf)", m_client[i]);
printf("\n");
}
printf( "\n" );
MPI_Gather(m_client, 3, MPI_DOUBLE, m, 3, MPI_DOUBLE, MASTER_RANK, MPI_COMM_WORLD);
if(rank == MASTER_RANK)
{
printf("Master: Received= \n");
for(int i=0; i<numProc; i++)
{
for(int j=0; j < 3; j++)
{
int idx = i*3 + j;
printf("%lf ", m[idx]);
}
printf("from Process %d\n", i);
}
}
free(m);
free(m_client);
MPI_Finalize();
exit(0);
}
I build my MPI file by using mpicc mpifile.c -o mpifile and run it with mpirun -np 3 ./mpifile. I use 3 processes.
The error I get is:
[Samuel-Z97-HD3:14361] *** Process received signal ***
[Samuel-Z97-HD3:14361] Signal: Segmentation fault (11)
[Samuel-Z97-HD3:14361] Signal code: (128)
[Samuel-Z97-HD3:14361] Failing at address: (nil)
I am using Ubuntu and vim / Geany.
Your code has two problems.
Both your malloc() calls are having wrong sizes. You should allocate number of bytes instead of number of doubles. E.g. instead of calling malloc(3), call malloc(3*sizeof(double)).
Another problem is that your variable m should be initialized to NULL. Alternatively, you could surround free(m) with if(rank == MASTER_RANK). As is, a non-master process calls free(m) where m is uninitialized and could contain arbitrary value.

Resources