mpi c mpirun noticed that process rank 0 exited on signal 11 - c

I have an MPI program that is solving the "metric traveling salesman problem".
When I run it on windows, it works as expected, and prints the shortest possible path.
when i run it on linux, i get a message saying that mpirun noticed that process rank 0 exited on signal 11.
When I searched this problem on StackOverflow, I saw that it often occurs when sending wrong arguments to MPI's send/receive functions, but I went over my code, and the arguments seems fine.
How else can I check my error?
If it helps, here's the two code files:
main.c :
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// forward declaration of tsp_main
int tsp_main(int citiesNum, int xCoord[], int yCoord[], int shortestPath[]);
int main(int argc, char** argv)
{
//int citiesNum = 18; //set a lower number for testing
int citiesNum = 10;
int xCoord[] = {1, 12, 13, 5, 5, 10, 5, 6, 7, 8, 9, 4, 11, 14, 4,8,4,6};
int yCoord[] = {7, 2, 3, 3, 5, 6, 7, 8, 9, 4, 11, 12, 13, 14, 5,1,7,33};
int* shortestPath = (int*)malloc(citiesNum * sizeof(int));
int i, myRank, minPathLen;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
clock_t begin = clock();
minPathLen = tsp_main(citiesNum, xCoord, yCoord, shortestPath);
clock_t end = clock();
if (myRank == 0)
{
printf("Execution time: %g seconds\n", (double)(end - begin) / CLOCKS_PER_SEC);
printf("The shortest path, %d long, is:\n", minPathLen);
for (i = 0; i < citiesNum; i++)
{
// print the city (and its distance from the next city in the path)
printf("%d (%d) ", shortestPath[i],
abs(xCoord[shortestPath[i]] - xCoord[shortestPath[(i + 1) % citiesNum]]) +
abs(yCoord[shortestPath[i]] - yCoord[shortestPath[(i + 1) % citiesNum]]) );
}
printf("%d\n", shortestPath[0]);
}
MPI_Finalize ();
return 0;
}
and tsp.c :
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <assert.h>
#include <string.h>
#include <time.h>
//play with this value to change the workload.
#define WORK_LOAD (10)
#define getDistance(a,b) (DistanceArray[a][b])
enum defines {
MASTER_ID = 0, // master ID. set to 0 (better safe than sorry)
DISTRIBUTE_NEW_TASK, // when the master send a new task to a worker
ASK_FOR_TASK, // when a worker asks from the master a new task
KILL, // when the master notifies the worker to die
SEND_MINIMUM, // when a process sends its current minimum
SEND_PATH, // when a worker updates the master of his best path
};
// initializes the factorials array. the i cell will contain i!. for example, factorials[4] will contain 24.
void initializeFactorials(long long int* factorials, int citiesNum) {
int i;
factorials[0] = 1;
for (i = 1; i < citiesNum; ++i) {
factorials[i] = i * factorials[i-1];
}
}
// initializes the two dimensional distance array. Element k,l will contain the distance between city k and city l
void initializeDistanceArray(int** DistanceArray, int citiesNum, int xCoord[], int yCoord[]) {
int k,l;
for (k=0; k < citiesNum; ++k) {
DistanceArray[k] = (int*)malloc(sizeof(int)*citiesNum);
}
for (k=0; k < citiesNum; ++k) {
for (l=0; l < citiesNum; ++l) {
DistanceArray[k][l] = abs(xCoord[k] - xCoord[l]) + abs(yCoord[k] - yCoord[l]);
}
}
}
/* initializes the edge minimum array. Element i contains the minimum weight of i+1 edges between different cities.
For example, element 0 contains the minimal edge. Element 5 contains the total weight of 6 edges going out from different cities*/
void initializeEdgeMinimum(int* edgeMinimum, int** DistanceArray, int citiesNum) {
int k, l, sum=0;
for (k=0; k < citiesNum; ++k) {
edgeMinimum[k] = INT_MAX;
for (l=0; l < citiesNum; ++l) {
if (l == k) continue;
if (getDistance(k,l) < edgeMinimum[k]) edgeMinimum[k] = getDistance(k,l);
}
}
for (k=0; k < citiesNum-1; ++k) {
for (l=k+1; l < citiesNum; ++l) {
if (edgeMinimum[l]>edgeMinimum[k]){
int temp = edgeMinimum[k];
edgeMinimum[k] = edgeMinimum[l];
edgeMinimum[l] = temp;
}
}
}
for (k=citiesNum-1; k >= 0; --k) {
sum += edgeMinimum[k];
edgeMinimum[k] = sum;
}
}
/* takes an index of a path as an argument, and converts it according to the decision tree (as explained in the external documentation)
to a path (circle) between cities*/
void convertIndexToPath(long long int index, int citiesNum, int* decisionTree, int* options, int* path, long long int* factorials) {
int i, j, decision;
long long int fact;
for(i = 0; i < citiesNum; ++i) {
fact = factorials[citiesNum-(i+1)];
decision = (int)(index/fact);
decisionTree[i] = decision;
index -= fact*((long long int)decision);
}
for(i = 0; i < citiesNum; ++i) {
options[i] = i+1;
}
path[0] = 0;
for(i = 1; i < citiesNum; ++i) {
path[i] = options[decisionTree[i]];
for(j = decisionTree[i]; j < citiesNum-i-1; ++j) {
options[j] = options[j+1];
}
}
}
/* takes the decision tree of the last path (as explained in the external documentation) and converts it to a path
ASSUMPTION: can be used ONLY if the last path was NOT pruned */
void convertdDecisionToPath(long long int index, int citiesNum, int* decisionTree, int* options, int* path, long long int* factorials) {
int i, j;
for(i = citiesNum-2; i > 0; --i) {
decisionTree[i] = (decisionTree[i] + 1) % (citiesNum - i);
if (decisionTree[i] != 0) break;
}
for(i = 0; i < citiesNum; ++i) {
options[i] = i+1;
}
path[0] = 0;
for(i = 1; i < citiesNum; ++i) {
path[i] = options[decisionTree[i]];
for(j = decisionTree[i]; j < citiesNum-i-1; ++j) {
options[j] = options[j+1];
}
}
}
// returns one index before the next iteration of the index of the path that we need to explore right after pruning.
long long int getIndexAfterPrune(long long int index, int citiesVisitedInPath, int citiesNum, int* decisionTree, long long int* factorials) {
int decision, i;
long long int fact, nextIndex = 0, indexBackup = index;
for(i = 0; i < citiesNum; ++i) {
fact = factorials[citiesNum-(i+1)];
decision = (int)(index/fact);
decisionTree[i] = decision;
index -= fact*((long long int)decision);
}
for(i = citiesVisitedInPath + 1; i < citiesNum; ++i) {
decisionTree[i] = 0;
}
for(i = 0; i < citiesNum; ++i) {
nextIndex += (decisionTree[i] * factorials[citiesNum-1-i]);
}
nextIndex += factorials[citiesNum - citiesVisitedInPath];
return nextIndex-1;
}
// returns how many possibilities (paths) there are in a single chunk
long long int getChunkSize(int citiesNum, int workersNum, long long int* factorials) {
--citiesNum;
long long int allPossibilities = factorials[citiesNum];
// empirically setting the chunk size
long long int chunkSize = WORK_LOAD*(allPossibilities/factorials[citiesNum/2]) / (workersNum);
if (citiesNum <= 3 || chunkSize == 0) { //the job is small, and one worker can handle it
return allPossibilities;
}
return chunkSize;
}
// returns the number of chunks that we need to handle
long long int getNumberOfChunks(int citiesNum, long long int chunkSize, long long int* factorials) {
--citiesNum;
long long int allPossibilities = factorials[citiesNum];
int lastChunk = 0;
if (allPossibilities % chunkSize != 0) {
lastChunk = 1;
}
return (allPossibilities/chunkSize) + lastChunk;
}
// returns how many workers should work on the task.
int getNeededWorkers(long long int numberOfChunks, int workersNum) {
if (workersNum >= numberOfChunks) {
return (int)numberOfChunks;
}
return workersNum;
}
/*
Splits the problem into many sub problems and sends tasks to the workers.
each task contains the start index (the stop index is simply calculated from the chunk size) and the optimal price known so far.
The master also listens for updates of the optimal price.
when all the workers finish, the masters send them a request to update him with their optimal solution, and then decides what's the global optimum.
conventions: variables are in camelCase, consts are in ALL_CAPS, and two dimensional arrays are in PascalCasing
*/
int runMaster(int citiesNum, int xCoord[], int yCoord[], int shortestPath[], int processesNum) {
// Variables
int doneWorkers = 0, neededWorkers, gotAnswer, junk, currentMinimum = INT_MAX;
long long int chunkSize, indexToCheck = 0, bestPathIndex, numberOfChunks;
MPI_Status status1, status2, status3;
MPI_Request request1 = MPI_REQUEST_NULL, junkRequest = MPI_REQUEST_NULL;
// Arrays
int *decisionTree, *options;
long long int *factorials, *recieveBuffer, *sendBuffer;
// Dynamic Allocations
decisionTree = (int*)malloc(citiesNum * sizeof(int));
options = (int*)malloc(citiesNum * sizeof(int));
factorials = (long long int*)malloc(citiesNum * sizeof(long long int));
recieveBuffer = (long long int*)malloc(2 * sizeof(long long int));
sendBuffer = (long long int*)malloc(2 * sizeof(long long int));
initializeFactorials(factorials, citiesNum);
long long int lastIndex = factorials[citiesNum-1]-1;
chunkSize = getChunkSize(citiesNum, processesNum-1, factorials);
numberOfChunks = getNumberOfChunks(citiesNum, chunkSize, factorials);
neededWorkers = getNeededWorkers(numberOfChunks, processesNum-1);
while (doneWorkers < neededWorkers) {
//check if a worker wants a new task
MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status1);
gotAnswer = 1;
MPI_Iprobe(MPI_ANY_SOURCE, ASK_FOR_TASK, MPI_COMM_WORLD, &gotAnswer, &status1);
if (gotAnswer) {
MPI_Recv(&junk, 0, MPI_INT, MPI_ANY_SOURCE, ASK_FOR_TASK, MPI_COMM_WORLD, &status1); //blocking recieve since we need the request to complete so we'd know who should get the task
if (indexToCheck <= lastIndex) {
// the master sends the current minimum, and a new job to the worker
sendBuffer[0] = currentMinimum;
sendBuffer[1] = indexToCheck;
indexToCheck += chunkSize;
MPI_Irsend(sendBuffer, 2, MPI_LONG_LONG_INT, status1.MPI_SOURCE, DISTRIBUTE_NEW_TASK, MPI_COMM_WORLD, &request1); // we're guaranteed that the worker called IRecv and ready to get a task. no need to block since we can continue doing are own calculations.
MPI_Request_free(&request1);
} else { // the master kills the worker
MPI_Irsend(&junk, 0, MPI_INT, status1.MPI_SOURCE, KILL, MPI_COMM_WORLD, &junkRequest); // we're guaranteed that the worker called IRecv and ready to get a task. no need to block since we can continue doing are own calculations.
MPI_Request_free(&junkRequest);
}
continue;
}
gotAnswer = 1;
MPI_Iprobe(MPI_ANY_SOURCE, SEND_MINIMUM, MPI_COMM_WORLD, &gotAnswer, &status2);
if(gotAnswer) { // the master recieves a miminal price from one of the workers and decides if it's the global minimum
MPI_Recv(recieveBuffer, 1, MPI_LONG_LONG_INT, MPI_ANY_SOURCE, SEND_MINIMUM, MPI_COMM_WORLD, &status2); // blocking, since we're going to use the recieve buffer
currentMinimum = (currentMinimum <= recieveBuffer[0]) ? currentMinimum : (int)recieveBuffer[0];
continue;
}
gotAnswer = 1;
MPI_Iprobe(MPI_ANY_SOURCE, SEND_PATH, MPI_COMM_WORLD, &gotAnswer, &status3);
if(gotAnswer) {
// the master recieves a miminal path and price from one of the workers and decides if it's the global minimum
MPI_Recv(recieveBuffer, 2, MPI_LONG_LONG_INT, MPI_ANY_SOURCE, SEND_PATH, MPI_COMM_WORLD, &status3); // blocking, since we're going to use the recieve buffer
++doneWorkers;
if(recieveBuffer[0] <= currentMinimum){
currentMinimum = (int)recieveBuffer[0];
bestPathIndex = recieveBuffer[1];
}
}
} //while
free(factorials); free(decisionTree); free(options); free(recieveBuffer); free(sendBuffer);
convertIndexToPath(bestPathIndex, citiesNum, decisionTree, options, shortestPath, factorials);
return currentMinimum;
}
/*
gets tasks from the master and process them until there are no more tasks to handle.
in each task, we go through all the possibilities in the current chunk, but skip paths that are heavier from the current known optimal path.
when we discover a new optimal path, we update the rest of the threads if necesssary (first, we check if we got a new optimal weight from them).
conventions: variables are in camelCase, consts are in ALL_CAPS, and two dimensional arrays are in PascalCasing
*/
void runWorker(int citiesNum, int xCoord[], int yCoord[], int shortestPath[], int processesNum) {
//Variables
int sum = 0, gotAnswer = 1, PRUNE_FACTOR = citiesNum - 3, LAST_CITY = citiesNum-1, indexReachedInPath, pruned, sumUntilPruned = 0, IndexUntilPruned = 0, doneWorkers = 0, neededWorkers, junk, myCurrentMinimum = INT_MAX, othersCurrentMinimum = INT_MAX, pid, k;
long long int numberOfChunks, chunkSize, indexToCheck = 0, startIndex, stopIndex, i, bestPathIndex = -1, lastIndex;
MPI_Status status1, status2;
MPI_Request request1 = MPI_REQUEST_NULL, junkRequest = MPI_REQUEST_NULL;
//Arrays
int *decisionTree, *edgeMinimum, *myCurrentPath, *options, **DistanceArray;
long long int *factorials, *recieveBuffer, *sendBuffer;
// Dynamic Allocations
decisionTree = (int*)malloc(citiesNum * sizeof(int));
edgeMinimum = (int*)malloc(sizeof(int)*citiesNum);
myCurrentPath = (int*)malloc(citiesNum * sizeof(int));
options = (int*)malloc(citiesNum * sizeof(int));
factorials = (long long int*)malloc(citiesNum * sizeof(long long int));
recieveBuffer = (long long int*)malloc(2 * sizeof(long long int));
sendBuffer = (long long int*)malloc(2 * sizeof(long long int));
DistanceArray = (int**)malloc(sizeof(int*)*citiesNum);
initializeFactorials(factorials, citiesNum);
initializeDistanceArray(DistanceArray, citiesNum, xCoord, yCoord);
initializeEdgeMinimum(edgeMinimum, DistanceArray, citiesNum);
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
lastIndex = factorials[citiesNum-1]-1;
chunkSize = getChunkSize(citiesNum, processesNum-1, factorials);
numberOfChunks = getNumberOfChunks(citiesNum, chunkSize, factorials);
neededWorkers = getNeededWorkers(numberOfChunks, processesNum-1);
if (pid > neededWorkers){ //free memory and exit
for (k=0; k < citiesNum; ++k) {
free(DistanceArray[k]);
}
free(factorials); free(decisionTree); free(options); free(recieveBuffer); free(sendBuffer); free(edgeMinimum); free(DistanceArray); free(myCurrentPath);
return;
}
MPI_Irecv(recieveBuffer, 2, MPI_LONG_LONG_INT, MASTER_ID, MPI_ANY_TAG, MPI_COMM_WORLD, &request1); // getting ready to recieve a new task from the master. no need to block
MPI_Ssend(&junk, 0, MPI_INT, MASTER_ID, ASK_FOR_TASK, MPI_COMM_WORLD); // asking the master for a new task. synced & blocking, since we don't have anything else to do until we get a new task
while(1) {
MPI_Wait(&request1, &status1); //avoid busy-wait
if (status1.MPI_TAG == DISTRIBUTE_NEW_TASK) {
// the worker got a new job from the master. recieveBuffer[0] contains the server's currentMinimum. recieveBuffer[1] contains indexToCheck
othersCurrentMinimum = (recieveBuffer[0] < othersCurrentMinimum) ? (int)recieveBuffer[0] : othersCurrentMinimum;
startIndex = recieveBuffer[1];
stopIndex = (startIndex + chunkSize >= lastIndex) ? lastIndex + 1 : startIndex + chunkSize;
pruned = 1;
indexReachedInPath = 0;
sum = 0;
for(i = startIndex; i < stopIndex; ++i) {
if (pruned) { // calculate the current path from the index
convertIndexToPath(i, citiesNum, decisionTree, options, myCurrentPath, factorials);
} else { // calculate the current path from the last path (decision tree)
convertdDecisionToPath(i, citiesNum, decisionTree, options, myCurrentPath, factorials);
}
sum = 0;
indexReachedInPath = 0;
pruned = 0;
for(; indexReachedInPath < LAST_CITY; ++indexReachedInPath) {
sum += getDistance(myCurrentPath[indexReachedInPath], myCurrentPath[indexReachedInPath+1]);
if (indexReachedInPath < PRUNE_FACTOR && sum + edgeMinimum[indexReachedInPath] >= othersCurrentMinimum) {
//prune
pruned = 1;
sum -= getDistance(myCurrentPath[indexReachedInPath], myCurrentPath[indexReachedInPath+1]);
if (indexReachedInPath == 0) {
i = getIndexAfterPrune(i,1,citiesNum, decisionTree, factorials);
} else {
i = getIndexAfterPrune(i,indexReachedInPath,citiesNum, decisionTree, factorials);
}
break;
}
}
if(pruned) continue;
sum += getDistance(myCurrentPath[LAST_CITY], myCurrentPath[0]); //return from the last city to the first
if(sum < othersCurrentMinimum) {
myCurrentMinimum = sum;
bestPathIndex = i;
//check for a new global minimum
gotAnswer = 1;
MPI_Iprobe(MPI_ANY_SOURCE, SEND_MINIMUM, MPI_COMM_WORLD, &gotAnswer, &status2);
if(gotAnswer) {
MPI_Recv(recieveBuffer, 1, MPI_INT, MPI_ANY_TAG, SEND_MINIMUM, MPI_COMM_WORLD, &status2); // blocking, since we're going to use the recieve buffer
othersCurrentMinimum = (recieveBuffer[0] < othersCurrentMinimum) ? (int)recieveBuffer[0] : othersCurrentMinimum;
}
if (myCurrentMinimum < othersCurrentMinimum) {
othersCurrentMinimum = sum;
for (k = 0; k < processesNum; ++k) {
if (junk == pid) continue;
MPI_Issend(&myCurrentMinimum, 1, MPI_INT, k, SEND_MINIMUM, MPI_COMM_WORLD, &junkRequest); // sending everyone our minimum, copying it to their memory. obviously, no need to block.
}
}
}
}
//send my minimum to the master if it's the global minimum
//if (myCurrentMinimum <= othersCurrentMinimum) {
// MPI_Issend(&myCurrentMinimum, 1, MPI_INT, MASTER_ID, SEND_MINIMUM, MPI_COMM_WORLD, &junkRequest); // sending the master our minimum.
//}
// get a new task from the master
MPI_Irecv(recieveBuffer, 2, MPI_LONG_LONG_INT, MASTER_ID, MPI_ANY_TAG, MPI_COMM_WORLD, &request1); // getting ready to recieve a new task from the master. no need to block
MPI_Ssend(&junk, 0, MPI_INT, MASTER_ID, ASK_FOR_TASK, MPI_COMM_WORLD); // asking the master for a new task. blocking, since we don't have anything else to do until we get a new task
continue;
}
if(status1.MPI_TAG == KILL) { // free resources, send the master the optimal path and price, and die.
for (k=0; k < citiesNum; ++k) {
free(DistanceArray[k]);
}
free(factorials); free(decisionTree); free(options); free(recieveBuffer); free(edgeMinimum); free(DistanceArray); free(myCurrentPath);
sendBuffer[0] = myCurrentMinimum;
sendBuffer[1] = bestPathIndex;
MPI_Ssend(sendBuffer, 2, MPI_LONG_LONG_INT, MASTER_ID, SEND_PATH, MPI_COMM_WORLD); // synced & blocking, since we don't have anything else to do until the master gets the information
free(sendBuffer);
return;
}
} // while
}
// The static parellel algorithm main function. runs the master and the workers.
int tsp_main(int citiesNum, int xCoord[], int yCoord[], int shortestPath[])
{
int rank, processesNum, result = 0;
MPI_Comm_size(MPI_COMM_WORLD, &processesNum);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) {
result = runMaster(citiesNum, xCoord, yCoord, shortestPath, processesNum);
} else {
runWorker(citiesNum, xCoord, yCoord, shortestPath, processesNum);
}
MPI_Barrier(MPI_COMM_WORLD);
return result;
}

I tried your code and i managed to get rid of the error.
I received a signal : "floating point exception : integer divide by zero". I searched where the exception occured and found that it came from the first /fact. It was thrown from proc 0, so i went to runMaster(). There is a line after the free(fact). I permuted these lines and the error disappeared.
This way may be the right one :
convertIndexToPath(bestPathIndex, citiesNum, decisionTree, options, shortestPath, factorials);
free(factorials); free(decisionTree); free(options); free(recieveBuffer); free(sendBuffer);
However, i tried the program using 2 or 3 processus and the outputs were different...I am surprised that it worked before !
Bye, Francis

Related

MPI Search In Array

Im trying to find a spesific value inside an array. Im trying to find it with parallel searching by mpi. When my code finds the value, it shows an error.
ERROR
Assertion failed in file src/mpid/ch3/src/ch3u_buffer.c at line 77: FALSE
memcpy argument memory ranges overlap, dst_=0x7ffece7eb590 src_=0x7ffece7eb590 len_=4
PROGRAM
const char *FILENAME = "input.txt";
const size_t ARRAY_SIZE = 640;
int main(int argc, char **argv)
{
int *array = malloc(sizeof(int) * ARRAY_SIZE);
int rank,size;
MPI_Status status;
MPI_Request request;
int done,myfound,inrange,nvalues;
int i,j,dummy;
/* Let the system do what it needs to start up MPI */
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);
myfound=0;
if (rank == 0)
{
createFile();
array = readFile(FILENAME);
}
MPI_Bcast(array, ARRAY_SIZE, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Irecv(&dummy, 1, MPI_INT, MPI_ANY_SOURCE, 1, MPI_COMM_WORLD, &request);
MPI_Test(&request, &done, &status);
nvalues = ARRAY_SIZE / size; //EACH PROCESS RUNS THAT MUCH NUMBER IN ARRAY
i = rank * nvalues; //OFFSET FOR EACH PROCESS INSIDE THE ARRAY
inrange = (i <= ((rank + 1) * nvalues - 1) && i >= rank * nvalues); //LIMIT OF THE OFFSET
while (!done && inrange)
{
if (array[i] == 17)
{
dummy = 1;
for (j = 0; j < size; j++)
{
MPI_Send(&dummy, 1, MPI_INT, j, 1, MPI_COMM_WORLD);
}
printf("P:%d found it at global index %d\n", rank, i);
myfound = 1;
}
printf("P:%d - %d - %d\n", rank, i, array[i]);
MPI_Test(&request, &done, &status);
++i;
inrange = (i <= ((rank + 1) * nvalues - 1) && i >= rank * nvalues);
}
if (!myfound)
{
printf("P:%d stopped at global index %d\n", rank, i - 1);
}
MPI_Finalize();
}
Error is somewhere in here because when i put an invalid number for example -5 into if condition, program runs smoothly.
dummy = 1;
for (j = 0; j < size; j++)
{
MPI_Send(&dummy, 1, MPI_INT, j, 1, MPI_COMM_WORLD);
}
printf("P:%d found it at global index %d\n", rank, i);
myfound = 1;
Thanks
Your program is invalid with respect to the MPI standard because you use the same buffer (&dummy) for both MPI_Irecv() and MPI_Send().
You can either use two distinct buffers (e.g. dummy_send and dummy_recv), or since you do not seem to care about the value of dummy, then use NULL as buffer and send/receive zero size messages.

How to implement a MPI filter on C code?

I am trying to implement a MPI of the filter code below, but I'm facing difficulties doing it. How should it be done?:
Filter code:
int A[100000][100000];
int B[100000][100000];
for (int i=1; i<(100000 - 1); i++)
for (int i=1; j<(100000 - 1); j++)
B[i][j] = A[i-1][j] + A[i+1][j] + A[i][j-1] + A[i][j+1] - 4*A[i][j];
This is what I have tried while following the six functions of MPI:
int myrank; /* Rank of process */
int numprocs; /* Number of processes */
int source; /* Rank of sender */
int dest; /* Rank of receiver */
char message[100]; /* Storage for the message */
MPI_Status status; /* Return status for receive */
MPI_Init( & argc, & argv);
MPI_Comm_size(MPI_COMM_WORLD, & numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, & myrank);
if (myrank != 0)
{
dest = 0;
MPI_Send(message, strlen(message) + 1,
MPI_CHAR, dest, 15, MPI_COMM_WORLD);
} else {
for (source = 1; source < numprocs; source++) {
MPI_Recv(message, 100, MPI_CHAR, source,
15, MPI_COMM_WORLD, & status);
}
}
MPI_Finalize();
I'd go like this. First of all, I'd have this code
int A[100000][100000];
int B[100000][100000];
replaced with dynamic allocations. You don't need all that memory for each and every process.
Then, I'd send array A to different processes. By rows.
What is the "height" of data frame (number of rows):
delta = (100000 - 2) / (numprocs-1); // we don't count first and last row
reminder = (100000 - 2) % (numprocs-1); // it might be that we need to give
// little bit more to calculate
// to one of the processes
// we are starting from row with idx=1 (second row) and we want to finish when
// we hit last row
if(myrank == 0) {
for( int i=1; i < numprocs; i++ ) {
// +100000 - we need two more rows to calculate data
int how_many_bytes = delta * 100000 + 200000;
if(reminder != 0 && i == (numprocs-1)) {
how_many_bytes += reminder * 100000;
}
MPI_Send(&(A[(i-1)*delta][0]), how_many_bytes, MPI_INT, i, 0,
MPI_COMM_WORLD);
}
} else {
// allocate memory for bytes
int *local_array = NULL;
int how_many_bytes = delta * 100000 + 200000;
if(reminder != 0 && i == (numprocs-1)) {
how_many_bytes += reminder * 100000;
}
local_array = malloc(how_many_bytes * sizeof(int));
MPI_Status status;
MPI_Recv(
local_array,
how_many_bytes,
MPI_INT,
0,
0,
MPI_COMM_WORLD,
&status);
}
// perform calculations for each and every slice
// remembering that we always have on extra row on
// top and one at the bottom
// send data back to master (as above, but vice versa).

How to handle MPI sendcount of zero

What is the correct way to handle a sendcount = 0 when using MPI_Gatherv (or any other function that requires a sendcount) when setting up the displs argument?
I have data that needs to be received by all processors, but all processors might not have any data to send themselves. As an MWE, I tried (on just two processors):
#include <stdlib.h>
#include <stdio.h>
#include <mpi.h>
int main(void)
{
int ntasks;
int thistask;
int n = 0;
int i;
int totcounts = 0;
int *data;
int *rbuf;
int *rcnts;
int *displs;
int *master_data;
int *master_displs;
// Set up mpi
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &thistask);
// Allocate memory for arrays needed by allgatherv
rbuf = calloc(ntasks, sizeof(int));
rcnts = calloc(ntasks, sizeof(int));
displs = calloc(ntasks, sizeof(int));
master_displs = calloc(ntasks, sizeof(int));
// Initialize the counts and displacement arrays
for(i = 0; i < ntasks; i++)
{
rcnts[i] = 1;
displs[i] = i;
}
// Allocate data on just one task, but not others
if(thistask == 1)
{
n = 3;
data = calloc(n, sizeof(int));
for(i = 0; i < n; i++)
{
data[i] = i;
}
}
// Get n so each other processor knows about what others are sending
MPI_Allgatherv(&n, 1, MPI_INT, rbuf, rcnts, displs, MPI_INT, MPI_COMM_WORLD);
// Now that we know how much data each processor is sending, we allocate the array
// to hold it all
for(i = 0; i < ntasks; i++)
{
totcounts += rbuf[i];
}
master_data = calloc(totcounts, sizeof(int));
// Get displs for master data
master_displs[0] = 0;
for(i = 1; i < ntasks; i++)
{
master_displs[i] = master_displs[i - 1] + rbuf[i - 1];
}
// Send each processor's data to all others
MPI_Allgatherv(&data, n, MPI_INT, master_data, rbuf, master_displs, MPI_INT, MPI_COMM_WORLD);
// Print it out to see if it worked
if(thistask == 0)
{
for(i = 0; i < totcounts; i++)
{
printf("master_data[%d] = %d\n", i, master_data[i]);
}
}
// Free
if(thistask == 1)
{
free(data);
}
free(rbuf);
free(rcnts);
free(displs);
free(master_displs);
free(master_data);
MPI_Finalize();
return 0;
}
The way that I've set up master_displs works when every processor has a non-zero n (that is, they have data to send). In this case, both entries will be zero. However, the results of this program are garbage. How would I set up the master_displs array to ensure that master_data holds the correct information (in this case, just master_data[i] = i, as received from task 1)?

MPI Reduce and Broadcast work, but cause a future return value to fail

I am using MPI to implement Dijkstras algorithm for a class. My teacher also has no idea of why this is broken and has given me permission to post here.
My problem is happening in the chooseVertex function. The program works fine for 1 processor, but when I run it with 2 processors, processor 0 fails to return leastPostition, even though I am able to print the contents of leastPosition on the line before the return.
My code:
#include "mpi.h"
#include <stdlib.h>
#include <stdio.h>
#define min(x,y) ((x) > (y) ? (y) : (x))
#define MASTER 0
#define INFINTY 100000
void dijkstra(int, int, int **, int *, int, int);
int chooseVertex(int *, int, int *, int, int);
int main(int argc, char* argv[])
{
int rank, size, i, j;
//Initialize MPI
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
//Initialize graph
int src = 0;
int n = 12;
int **edge = (int**) malloc(n * sizeof(int *));
for (i = 0; i < n; i++)
edge[i] = (int *)malloc(n * sizeof(int));
int dist[12];
//Set all graph lengths to infinity
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
if (i == j) { edge[i][j] = 0; }
else { edge[i][j] = INFINTY; }
}
}
//set graph edge lengths
edge[0][3] = 5;
edge[0][6] = 13;
edge[1][5] = 12;
edge[2][1] = 7;
edge[3][2] = 9;
edge[3][4] = 2;
edge[4][7] = 3;
edge[5][10] = 1;
edge[5][11] = 4;
edge[6][9] = 9;
edge[7][8] = 4;
edge[8][9] = 10;
edge[8][10] = 7;
edge[9][10] = 6;
edge[10][11] = 1;
dijkstra(src, n, edge, dist, rank, size);
if(rank == MASTER){ printf("The distance is %d", dist[n - 1]); }
MPI_Finalize();
return 0;
}
//called by dijkstras function below
int chooseVertex(int *dist, int n, int *found, int rank, int size) {
int i, tmp, partition, lower, upper, leastPosition;
int least = INFINTY;
//set the number of nodes wach processor will work with
partition = n / size;
lower = rank * partition;
upper = lower + partition;
//used for MPI_Reduce
struct {
int pos;
int val;
} sendBuffr, recvBuffr;
//calculate least position
for (i = lower; i < upper; i++) {
tmp = dist[i];
if ((!found[i]) && (tmp < least)) {
least = tmp;
leastPosition = i;
}
}
//if all nodes checked are INFINITY, go with last node checked
if (least == INFINTY) leastPosition = i;
//set the send buffer for MPI_Reduce
sendBuffr.val = least;
sendBuffr.pos = leastPosition;
//Rank 0 processor has correct least position and value
MPI_Reduce(&sendBuffr, &recvBuffr, 1, MPI_DOUBLE_INT, MPI_MINLOC, MASTER, MPI_COMM_WORLD);
if (rank == MASTER) leastPosition = recvBuffr.pos;
//Update all processors to have correct position
MPI_Bcast(&leastPosition, 1, MPI_INT, MASTER, MPI_COMM_WORLD);
//Print the contents of leastPosition on rank 0 for debugging
if(rank == MASTER) printf("LeastPosition for rank %d is: %d\n", rank, leastPosition);
fflush(stdout);
return leastPosition;
}
void dijkstra(int SOURCE, int n, int **edge, int *dist, int rank, int size)
{
int i, j, count, partition, lower, upper, *found, *sendBuffer;
j = INFINTY;
sendBuffer = (int *)malloc(n * sizeof(int));
found = (int *)calloc(n, sizeof(int));
partition = n / size;
lower = rank * partition;
upper = lower + partition;
//set the distance array
for (i = 0; i < n; i++) {
found[i] = 0;
dist[i] = edge[SOURCE][i];
sendBuffer[i] = dist[i];
}
found[SOURCE] = 1;
count = 1;
//Dijkstra loop
while (count < n) {
printf("before ChooseVertex: rank %d reporting\n", rank);
fflush(stdout);
j = chooseVertex(dist, n, found, rank, size);
printf("after ChooseVertex: rank %d reporting\n", rank);
fflush(stdout);
count++;
found[j] = 1;
for (i = lower; i < upper; i++) {
if (!found[i])
{
dist[i] = min(dist[i], dist[j] + edge[j][i]);
sendBuffer[i] = dist[i];
}
}
MPI_Reduce(sendBuffer, dist, n, MPI_INT, MPI_MIN, MASTER, MPI_COMM_WORLD);
MPI_Bcast(dist, n, MPI_INT, MASTER, MPI_COMM_WORLD);
}
}
Sample error messages:
before ChooseVertex: rank 1 reporting
before ChooseVertex: rank 0 reporting
LeastPosition for rank 0 is: 3
after ChooseVertex: rank 1 reporting
after ChooseVertex: rank 0 reporting
before ChooseVertex: rank 1 reporting
before ChooseVertex: rank 0 reporting
after ChooseVertex: rank 1 reporting
LeastPosition for rank 0 is: 4
after ChooseVertex: rank 0 reporting
before ChooseVertex: rank 0 reporting
before ChooseVertex: rank 1 reporting
LeastPosition for rank 0 is: 7
after ChooseVertex: rank 1 reporting
job aborted:
[ranks] message
[0] process exited without calling finalize
[1] terminated
---- error analysis -----
[0] on My-ComputerName
Assignmet3PP ended prematurely and may have crashed. exit code 3
---- error analysis -----
Your reduce command is:
MPI_Reduce(&sendBuffr, &recvBuffr, 1, MPI_DOUBLE_INT, MPI_MINLOC, MASTER, MPI_COMM_WORLD);
By using MPI_DOUBLE_INT, you are saying that you are sending a struct with two variables: a double followed by an int. This is not your struct however: you only have 2 ints. Therefore you should use MPI_2INT. These types were derived from this source. Alternatively, you could create your own type using vectors.
An example fix is:
MPI_Reduce(&sendBuffr, &recvBuffr, 1, MPI_2INT, MPI_MINLOC, MASTER, MPI_COMM_WORLD);
Also, a reduction, followed by a broadcast can be easily combined into one step with MPI_Allreduce().

Problem with MPI matrix-matrix multiply: Cluster slower than single computer

I code a small program using MPI to parallelize matrix-matrix multiplication. The problem is: When running the program on my computer, it takes about 10 seconds to complete, but about 75 seconds on a cluster. I think I have some synchronization problem, but I cannot figure it out (yet).
Here's my source code:
/*matrix.c
mpicc -o out matrix.c
mpirun -np 11 out
*/
#include <mpi.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#define N 1000
#define DATA_TAG 10
#define B_SENT_TAG 20
#define FINISH_TAG 30
int master(int);
int worker(int, int);
int main(int argc, char **argv) {
int myrank, p;
double s_time, f_time;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
if (myrank == 0) {
s_time = MPI_Wtime();
master(p);
f_time = MPI_Wtime();
printf("Complete in %1.2f seconds\n", f_time - s_time);
fflush(stdout);
}
else {
worker(myrank, p);
}
MPI_Finalize();
return 0;
}
int *read_matrix_row();
int *read_matrix_col();
int send_row(int *, int);
int recv_row(int *, int, MPI_Status *);
int send_tag(int, int);
int write_matrix(int *);
int master(int p) {
MPI_Status status;
int *a; int *b;
int *c = (int *)malloc(N * sizeof(int));
int i, j; int num_of_finish_row = 0;
while (1) {
for (i = 1; i < p; i++) {
a = read_matrix_row();
b = read_matrix_col();
send_row(a, i);
send_row(b, i);
//printf("Master - Send data to worker %d\n", i);fflush(stdout);
}
wait();
for (i = 1; i < N / (p - 1); i++) {
for (j = 1; j < p; j++) {
//printf("Master - Send next row to worker[%d]\n", j);fflush(stdout);
b = read_matrix_col();
send_row(b, j);
}
}
for (i = 1; i < p; i++) {
//printf("Master - Announce all row of B sent to worker[%d]\n", i);fflush(stdout);
send_tag(i, B_SENT_TAG);
}
//MPI_Barrier(MPI_COMM_WORLD);
for (i = 1; i < p; i++) {
recv_row(c, MPI_ANY_SOURCE, &status);
//printf("Master - Receive result\n");fflush(stdout);
num_of_finish_row++;
}
//printf("Master - Finish %d rows\n", num_of_finish_row);fflush(stdout);
if (num_of_finish_row >= N)
break;
}
//printf("Master - Finish multiply two matrix\n");fflush(stdout);
for (i = 1; i < p; i++) {
send_tag(i, FINISH_TAG);
}
//write_matrix(c);
return 0;
}
int worker(int myrank, int p) {
int *a = (int *)malloc(N * sizeof(int));
int *b = (int *)malloc(N * sizeof(int));
int *c = (int *)malloc(N * sizeof(int));
int i;
for (i = 0; i < N; i++) {
c[i] = 0;
}
MPI_Status status;
int next = (myrank == (p - 1)) ? 1 : myrank + 1;
int prev = (myrank == 1) ? p - 1 : myrank - 1;
while (1) {
recv_row(a, 0, &status);
if (status.MPI_TAG == FINISH_TAG)
break;
recv_row(b, 0, &status);
wait();
//printf("Worker[%d] - Receive data from master\n", myrank);fflush(stdout);
while (1) {
for (i = 1; i < p; i++) {
//printf("Worker[%d] - Start calculation\n", myrank);fflush(stdout);
calc(c, a, b);
//printf("Worker[%d] - Exchange data with %d, %d\n", myrank, next, prev);fflush(stdout);
exchange(b, next, prev);
}
//printf("Worker %d- Request for more B's row\n", myrank);fflush(stdout);
recv_row(b, 0, &status);
//printf("Worker %d - Receive tag %d\n", myrank, status.MPI_TAG);fflush(stdout);
if (status.MPI_TAG == B_SENT_TAG) {
break;
//printf("Worker[%d] - Finish calc one row\n", myrank);fflush(stdout);
}
}
//wait();
//printf("Worker %d - Send result\n", myrank);fflush(stdout);
send_row(c, 0);
for (i = 0; i < N; i++) {
c[i] = 0;
}
}
return 0;
}
int *read_matrix_row() {
int *row = (int *)malloc(N * sizeof(int));
int i;
for (i = 0; i < N; i++) {
row[i] = 1;
}
return row;
}
int *read_matrix_col() {
int *col = (int *)malloc(N * sizeof(int));
int i;
for (i = 0; i < N; i++) {
col[i] = 1;
}
return col;
}
int send_row(int *row, int dest) {
MPI_Send(row, N, MPI_INT, dest, DATA_TAG, MPI_COMM_WORLD);
return 0;
}
int recv_row(int *row, int src, MPI_Status *status) {
MPI_Recv(row, N, MPI_INT, src, MPI_ANY_TAG, MPI_COMM_WORLD, status);
return 0;
}
int wait() {
MPI_Barrier(MPI_COMM_WORLD);
return 0;
}
int calc(int *c_row, int *a_row, int *b_row) {
int i;
for (i = 0; i < N; i++) {
c_row[i] = c_row[i] + a_row[i] * b_row[i];
//printf("%d ", c_row[i]);
}
//printf("\n");fflush(stdout);
return 0;
}
int exchange(int *row, int next, int prev) {
MPI_Request request; MPI_Status status;
MPI_Isend(row, N, MPI_INT, next, DATA_TAG, MPI_COMM_WORLD, &request);
MPI_Irecv(row, N, MPI_INT, prev, MPI_ANY_TAG, MPI_COMM_WORLD, &request);
MPI_Wait(&request, &status);
return 0;
}
int send_tag(int worker, int tag) {
MPI_Send(0, 0, MPI_INT, worker, tag, MPI_COMM_WORLD);
return 0;
}
int write_matrix(int *matrix) {
int i;
for (i = 0; i < N; i++) {
printf("%d ", matrix[i]);
}
printf("\n");
fflush(stdout);
return 0;
}
Well, you have a fairly small matrix (N=1000), and secondly you distribute your algorithm on a row/column basis rather than blocked.
For a more realistic version using better algorithms, you might want to acquire an optimized BLAS library (e.g. GOTO is free), test single-thread performance with that one, then get PBLAS and link it against your optimized BLAS, and compare MPI parallel performance using the PBLAS version.
I see some errors in your program:
First, why are you calling the wait function since its implementation is simply calling MPI_Barrier. MPI_Barrier is a primitive synchronization that blocks all threads until they reach the "barrier" by calling MPI_Barrier. My question is: do you want the master to be synchronized with the workers? In this context, that would not be optimal because a worker doesn't need to wait for the master to begin its calculation.
Second, there are 2 unnecessary for loops.
for (i = 1; i < N / (p - 1); i++) {
for (j = 1; j < p; j++) {
b = read_matrix_col();
send_row(b, j);
}
}
for (i = 1; i < p; i++) {
send_tag(i, B_SENT_TAG);
}
In the first i-loop, you don't use the variable in your statement. Since the j-loop and the second i-loop are the same, you could do:
for (i = 0; i < p; i++) {
b = read_matrix_col();
send_row(b, j);
send_tag(i, B_SENT_TAG);
}
In terms of data transfer, your program is not optimized because you are sending an array of 1000 integers of data for each data transfer. There should be a better way to optimise the data transfer, but I will let you look at it. So make the corrections I told you and tell us what is your new performance.
And as #janneb said, you can use the BLAS library for better performance for matrix multiplication. Good luck!
I did not look over your code, but I can provide some hints about why your result may not unexpected:
As already mentioned, N=1000 may be too small. You should make more tests to see the scalability of your program (try setting N=100, 500, 1000, 5000, 10000, etc.) and compare results on both your system and the cluster.
Compare results between your system (one processor I presume) and a single processor on the cluster. Usually in production environments like servers or clusters a single processor is less powerful than the best processors designed for desktop use, but they provide stability, reliability and other features useful for environments which run 24h/day at full capacity.
If your processor has multiple cores, more than one MPI processes may run at the same time and synchronization between them is negligible compared to the synchronization between nodes in a cluster.
Are the nodes from the cluster statically assigned to you? Maybe other users' programs can be scheduled on the nodes you are running at the same time as you.
Read documentation about the cluster's architecture. Some architectures may be more suitable for particular classes of problems.
Assess latency of the network of the cluster. Ping-ing from each node to another many times and computing the mean value may give a rough estimate.
Last but perhaps the most important, your algorithm may not be optimal. Read a/some books on matrix multiplication (I can recommend "Matrix Computations", Golub and Van Loan).

Resources