I want to send a struct that contains some variables and an array dynamically allocated.
I read that it's not possible to send all at one because of the dynamic array. I should send first a message with all the other variables and then another message with the dynamic array.
Because of that, I thought I could send directly a copy of the content of the struct in an dynamic array of BYTE (look at the code for more details).
I wrote the following code.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <mpi.h>
// I read MPI_BYTE is an unsigned char
typedef unsigned char BYTE;
typedef struct Message
{
int id;
int detectNr;
char *detection;
} Msg;
int main() {
int size, rank;
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
printf("Invalid number of processes.\n");
return -1;
}
// initialize an empty message
Msg msg = {.id = 1, .detectNr = 0, .detection = NULL};
// here we should take the measurements
// for now suppose there are 10
msg.detectNr = 10;
msg.detection = malloc(sizeof(char) * msg.detectNr);
for (int i = 0; i < msg.detectNr; i++)
msg.detection[i] = 'a' + i;
if(rank != 0) {
// put the data inside a buffer of BYTE
int bufferSize = sizeof(int) + sizeof(int) + sizeof(char) * msg.detectNr;
BYTE *buffer = malloc(sizeof(BYTE) * bufferSize);
memcpy(buffer, &msg.id, sizeof(int));
memcpy(buffer + sizeof(int), &msg.detectNr, sizeof(int));
memcpy(buffer + (2 * sizeof(int)), &msg.detection, sizeof(char) * msg.detectNr);
// send buffer to process 0
MPI_Send(buffer, bufferSize, MPI_BYTE, 0, 0, MPI_COMM_WORLD);
free(buffer);
free(msg.detection);
} else {
for (int i = 1; i < size; i++) {
int bufferSize;
BYTE *buffer;
MPI_Status status;
// initialize an empty message
Msg rcv= {.id = 0, .detectNr = 0, .detection = NULL};
// probe for an incoming message from process zero
MPI_Probe(i, 0, MPI_COMM_WORLD, &status);
// when probe returns, the status object has the size and other
// attributes of the incoming message
// get the message size
MPI_Get_count(&status, MPI_BYTE, &bufferSize);
printf("\nProcess %d: buffer size: %d\n", rank, bufferSize);
// allocate a buffer to hold the incoming data
buffer = malloc(sizeof(BYTE) * bufferSize);
// now receive the message with the allocated buffer
MPI_Recv(buffer, bufferSize, MPI_BYTE, i, 0, MPI_COMM_WORLD, &status);
// copy the data from the buffer to the message
memcpy(&rcv.id, buffer, sizeof(int));
memcpy(&rcv.detectNr, buffer + sizeof(int), sizeof(int));
memcpy(&rcv.detection, buffer + (2 * sizeof(int)), sizeof(char) * rcv.detectNr);
printf("Process %d: id: %d\n", rank, rcv.id);
printf("Process %d: detectNr: %d\n", rank, rcv.detectNr);
printf("Process %d: detection: %s\n", rank, rcv.detection);
free(rcv.detection);
free(buffer);
}
}
MPI_Finalize();
return 0;
}
Unfortunately it doesn't work, here below the result.
Process 0: buffer size: 18
Process 1: id: 1
Process 1: detectNr: 10
YOUR APPLICATION TERMINATED WITH THE EXIT STRING: Segmentation fault (signal 11)
It looks like, and probably it is so, if the last part of the buffer used for receiving is empty.
I don't understand why it happens, I'm not trying to send the original array detection, but instead I'm allocating an entire new dynamic array and I'm copying all the values inside.
Can you help me to solve the problem, or can you explain to me why it doesn't works?
I don't know if this can help, I'm developing in C, under Ubuntu, with VSCode as editor and gcc as compiler.
Ok, basically the problem wasn't MPI, but the memory allocation, so my fault.
Here below the right code:
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <mpi.h>
// I read MPI_BYTE is an unsigned char
typedef unsigned char BYTE;
typedef struct Message
{
int id;
int detectNr;
char *detection;
} Msg;
int main() {
int size, rank;
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
printf("Invalid number of processes.\n");
return -1;
}
if(rank != 0) {
// initialize an empty message
Msg msg = {.id = 1, .detectNr = 0, .detection = NULL};
// here we should take the measurements
// for now suppose there are 10
msg.detectNr = 10;
msg.detection = malloc(sizeof(char) * msg.detectNr);
for (i = 0; i < msg.detectNr; i++)
msg.detection[i] = 'a' + i;
printf("Process %d: id: %d\n", rank, msg.id);
printf("Process %d: detectNr: %d\n", rank, msg.detectNr);
for (i = 0; i < msg.detectNr; i++)
printf("Process %d: msg.detect[%d]: %c\n", rank, i, msg.detection[i]);
for (int i = 0; i < msg.detectNr; i++)
msg.detection[i] = 'a' + i;
printf("Process %d: i: %d\n", rank, i);
// put the data inside a buffer of BYTE
int bufferSize = sizeof(int) + sizeof(int) + sizeof(char) * msg.detectNr;
BYTE *buffer = malloc(sizeof(BYTE) * bufferSize);
memcpy(buffer, &msg.id, sizeof(int));
memcpy(buffer + sizeof(int), &msg.detectNr, sizeof(int));
memcpy(buffer + (2 * sizeof(int)), msg.detection, sizeof(char) * msg.detectNr);
printf("\nProcess %d: buffer size: %d\n", rank, bufferSize);
// send buffer to process 0
MPI_Send(buffer, bufferSize, MPI_BYTE, 0, 0, MPI_COMM_WORLD);
free(buffer);
free(msg.detection);
} else {
for (int i = 1; i < size; i++) {
int bufferSize;
BYTE *buffer;
MPI_Status status;
// initialize an empty message
Msg rcv = {.id = 0, .detectNr = 0, .detection = NULL};
// probe for an incoming message from process zero
MPI_Probe(i, 0, MPI_COMM_WORLD, &status);
// when probe returns, the status object has the size and other
// attributes of the incoming message
// get the message size
MPI_Get_count(&status, MPI_BYTE, &bufferSize);
printf("\nProcess %d: buffer size: %d\n", rank, bufferSize);
// allocate a buffer to hold the incoming data
buffer = malloc(sizeof(BYTE) * bufferSize);
// now receive the message with the allocated buffer
MPI_Recv(buffer, bufferSize, MPI_BYTE, i, 0, MPI_COMM_WORLD, &status);
// copy the data from the buffer to the message
memcpy(&rcv.id, buffer, sizeof(int));
memcpy(&rcv.detectNr, buffer + sizeof(int), sizeof(int));
rcv.detection = malloc(sizeof(char) * rcv.detectNr);
memcpy(rcv.detection, buffer + (2 * sizeof(int)), sizeof(char) * rcv.detectNr);
printf("Process %d: id: %d\n", rank, rcv.id);
printf("Process %d: detectNr: %d\n", rank, rcv.detectNr);
for (i = 0; i < rcv.detectNr; i++)
printf("Process %d: rcv.detect[%d]: %c\n", rank, i, rcv.detection[i]);
printf("Process %d: i: %d\n", rank, i);
free(rcv.detection);
free(buffer);
}
}
MPI_Finalize();
return 0;
}
Related
In my C program I have a structure like the one below
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
typedef struct Message
{
int elemNr;
char *elem;
} Msg;
I think I made all the steps to create custom data type in MPI
int main(int argc, char **argv) {
int size, rank;
int i;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
fprintf(stderr,"Requires at least two processes.\n");
exit(-1);
}
// just for simplicity
const int n = 5;
// create a new type for struct message
MPI_Datatype myType;
Msg msgSnd;
int block_length[2] = {1, n};
MPI_Aint elemNrAddr, elemAddr;
MPI_Aint displacement[2] = {0, 0};
MPI_Get_address(&msgSnd.elemNr, &elemNrAddr);
MPI_Get_address(&msgSnd.elem, &elemAddr);
// just displacement[1] because displacement[0] starts from 0
displacement[1] = elemAddr - elemNrAddr;
MPI_Datatype types[2] = {MPI_INT, MPI_CHAR};
MPI_Type_create_struct(2, block_length, displacement, types, &myType);
MPI_Type_commit(&myType);
// populate the message
msgSnd.elemNr = n;
msgSnd.elem = malloc(sizeof(char) * msgSnd.elemNr);
srand(time(NULL));
for (i = 0; i < msgSnd.elemNr; i++)
msgSnd.elem[i] = rand() % 26 + 'a';
if (rank != 0) {
printf("I'm sending\n");
MPI_Send(&msgSnd, 1, myType, 0, 0, MPI_COMM_WORLD);
printf("I sent\n");
} else {
MPI_Status status;
Msg msgRcv;
printf("I'm receiving\n");
MPI_Recv(&msgRcv, 1, myType, 1, 0, MPI_COMM_WORLD, &status);
printf("I received\n");
for (i = 0; i < msgRcv.elemNr; i++)
printf("element %d: %c\n", i, msgRcv.elem[i]);
if (msgRcv.elem != NULL)
free(msgRcv.elem);
}
if (msgSnd.elem != NULL)
free(msgSnd.elem);
MPI_Type_free(&myType);
MPI_Finalize();
return 0;
}
I ran the above code, but unfortunately when process 0 receives the message, the elem pointer points to null and the program ends with segmentation fault.
Can you help me to find the problem? Or, how can I send a dynamic array inside a struct on MPI?
First, I precise that I am french and my english is not really good.
I am working on MPI application and I have some problems and I hope that somebody can help me.
As reported in the title of my post, I try to use a thread to listen when I have to kill my application and then call the MPI_Finalize function.
However, my application does not finish correcty.
More precisely, I obtain the following message:
[XPS-2720:27441] * Process received signal *
[XPS-2720:27441] Signal: Segmentation fault (11)
[XPS-2720:27441] Signal code: Address not mapped (1)
[XPS-2720:27441] Failing at address: 0x7f14077a3b6d
[XPS-2720:27440] * Process received signal *
[XPS-2720:27440] Signal: Segmentation fault (11)
[XPS-2720:27440] Signal code: Address not mapped (1)
[XPS-2720:27440] Failing at address: 0x7fb11d07bb6d
mpirun noticed that process rank 1 with PID 27440 on node lagniez-XPS-2720 exited on signal 11 (Segmentation fault).
My slave code is:
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <sys/types.h>
#include <pthread.h>
#include <cassert>
#define send_data_tag 1664
#define send_kill_tag 666
void *finilizeMPICom(void *intercomm)
{
printf("the finilizeMPICom was called\n");
MPI_Comm parentcomm = * ((MPI_Comm *) intercomm);
MPI_Status status;
int res;
// sleep(10);
MPI_Recv(&res, 1, MPI_INT, 0, send_kill_tag, parentcomm, &status);
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("we receive something %d -- %d\n", rank, res);
MPI_Finalize();
exit(0);
}// finilizeMPICom
int main( int argc, char *argv[])
{
int numtasks, rank, len, rc;
char hostname[MPI_MAX_PROCESSOR_NAME];
int provided, claimed;
rc = MPI_Init_thread(0, 0, MPI_THREAD_MULTIPLE, &provided);
MPI_Query_thread( &claimed );
if (rc != MPI_SUCCESS || provided != 3)
{
printf ("Error starting MPI program. Terminating.\n");
MPI_Abort(MPI_COMM_WORLD, rc);
}
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm parentcomm;
MPI_Comm_get_parent(&parentcomm);
/* create a second thread to listen when we have to kill the program */
pthread_t properlyKill;
if(pthread_create(&properlyKill, NULL, finilizeMPICom, (void *) &parentcomm))
{
fprintf(stderr, "Error creating thread\n");
return 0;
}
assert(parentcomm != MPI_COMM_NULL);
MPI_Status status;
int root_process, ierr, num_rows_to_receive;
int mode;
MPI_Recv( &mode, 1, MPI_INT, 0, send_data_tag, parentcomm, &status);
printf("c The solver works in the mode %d\n", mode);
printf("I sent a message %d\n", rank);
// if(rank != 1) sleep(100);
int res = 1;
MPI_Send(&res, 1, MPI_INT, 0, send_data_tag, parentcomm);
printf("we want to listen for somethiing %d\n", rank);
int rescc = 1;
MPI_Recv(&rescc, 1, MPI_INT, 0, send_data_tag, parentcomm, &status);
printf("I received the message %d %d\n", rescc, rank);
if(rescc == 1000)
{
printf("~~~~~~~~>>> I print the solution %d\n", rank);
int res3 = 1001;
MPI_Send(&res3, 1, MPI_INT, 0, send_data_tag, parentcomm);
}
else printf("I do not understand %d\n", rank);
printf("I wait the thread to kill the programm %d\n", rank);
pthread_join(properlyKill, (void**)&(res));
return 0;
}
For the master I have:
int main(int argc, char **argv)
{
Parser *p = new Parser("slave.xml");
MPI_Init(&argc, &argv);
if(p->method == "concurrent")
{
ConcurrentManager cc(p->instance, p->solvers);
cc.run();
}
else
{
cerr << "c The only available methods are: concurrent, eps (Embarrassingly Parallel Search) or tree" << endl;
exit(1);
}
delete(p);
MPI_Finalize();
exit(0);
}// main
/**
Create a concurrent manager (means init the data structures to run
the solvers).
#param[in] _instance, the benchmark path
#param[in] _solvers, the set of solvers that will be ran
*/
ConcurrentManager::ConcurrentManager(string _instance, vector<Solver> &_solvers) :
instance(_instance), solvers(_solvers)
{
cout << "c\nc Concurrent manager called" << endl;
nbSolvers = _solvers.size();
np = new int[nbSolvers];
cmds = new char*[nbSolvers];
arrayOfArgs = new char **[nbSolvers];
infos = new MPI_Info[nbSolvers];
for(int i = 0 ; i<nbSolvers ; i++)
{
np[i] = solvers[i].npernode;
cmds[i] = new char[(solvers[i].executablePath).size() + 1];
strcpy(cmds[i], (solvers[i].executablePath).c_str());
arrayOfArgs[i] = new char *[(solvers[i].options).size() + 1];
for(unsigned int j = 0 ; j<(solvers[i].options).size() ; j++)
{
arrayOfArgs[i][j] = new char[(solvers[i].options[j]).size() + 1];
strcpy(arrayOfArgs[i][j], (solvers[i].options[j]).c_str());
}
arrayOfArgs[i][(solvers[i].options).size()] = NULL;
MPI_Info_create(&infos[i]);
char hostname[solvers[i].hostname.size()];
strcpy(hostname, solvers[i].hostname.c_str());
MPI_Info_set(infos[i], "host", hostname);
}
sizeComm = 0;
}// constructor
/**
Wait that at least one process finish and return the code
SOLUTION_FOUND.
#param[in] intercomm, the communicator
*/
void ConcurrentManager::waitForSolution(MPI_Comm &intercomm)
{
MPI_Status arrayStatus[sizeComm], status;
MPI_Request request[sizeComm];
int val[sizeComm], flag;
for(int i = 0 ; i<sizeComm ; i++) MPI_Irecv(&val[i], 1, MPI_INT, i, TAG_MSG, intercomm, &request[i]);
bool solutionFound = false;
while(!solutionFound)
{
for(int i = 0 ; i<sizeComm ; i++)
{
MPI_Test(&request[i], &flag, &arrayStatus[i]);
if(flag)
{
printf("---------------------> %d reveived %d\n", i , val[i]);
if(val[i] == SOLUTION_FOUND)
{
int msg = PRINT_SOLUTION;
MPI_Send(&msg, 1, MPI_INT, i, TAG_MSG, intercomm); // ask to print the solution
int msgJobFinished;
MPI_Recv(&msgJobFinished, 1, MPI_INT, i, TAG_MSG, intercomm, &status); // wait the answer
assert(msgJobFinished == JOB_FINISHED);
cout << "I am going to kill everybody" << endl;
int msgKill[sizeComm];
for(int j = 0 ; j<sizeComm ; j++)
{
msgKill[i] = STOP_AT_ONCE;
MPI_Send(&msgKill[i], 1, MPI_INT, j, TAG_KILL, intercomm);
}
solutionFound = true;
break;
} else
{
printf("restart the communication for %d\n", i);
MPI_Irecv(&val[i], 1, MPI_INT, i, TAG_MSG, intercomm, &request[i]);
}
}
}
}
}// waitForSolution
/**
Run the solver.
*/
void ConcurrentManager::run()
{
MPI_Comm intercomm;
int errcodes[solvers.size()];
MPI_Comm_spawn_multiple(nbSolvers, cmds, arrayOfArgs, np, infos, 0, MPI_COMM_WORLD, &intercomm, errcodes);
MPI_Comm_remote_size(intercomm, &sizeComm);
cout << "c Solvers are now running: " << sizeComm << endl;
int msg = CONCU_MODE;
for(int i = 0 ; i<sizeComm ; i++) MPI_Send(&msg, 1, MPI_INT, i, TAG_MSG, intercomm); // init the working mode
waitForSolution(intercomm);
}// run
I know that I put a lot of code :(
But, I do not know where is the problem.
Please, help me :)
Best regards.
The MPI documentation for how MPI interacts with threads demands that the call to MPI_Finalize() be performed by the main thread -- that is, the same one that initialized MPI. In your case, that happens also to be your process's initial thread.
In order to satisfy MPI's requirements, you could reorganize your application so that the initial thread is the one that waits for a kill signal and then shuts down MPI. The other work it currently does would then need to be moved to a different thread.
I have developed a given simple MPI program such that process 0 sends message to process 1 and receives message from process p-1. Following is the code :
In the skeleton given to me ,
char *message;
message= (char*)malloc(msg_size);
is confusing me. To check the correctness of program, I am trying to look value of message that been sent or received. So should it be hexadecimal value?
int main(int argc, char **argv)
{
double startwtime, endwtime;
float elapsed_time, bandwidth;
int my_id, next_id; /* process id-s */
int p; /* number of processes */
char* message; /* storage for the message */
int i, k, max_msgs, msg_size, v;
MPI_Status status; /* return status for receive */
MPI_Init( &argc, &argv );
MPI_Comm_rank( MPI_COMM_WORLD, &my_id );
MPI_Comm_size( MPI_COMM_WORLD, &p );
if (argc < 3)
{
fprintf (stderr, "need msg count and msg size as params\n");
goto EXIT;
}
if ((sscanf (argv[1], "%d", &max_msgs) < 1) ||
(sscanf (argv[2], "%d", &msg_size) < 1))
{
fprintf (stderr, "need msg count and msg size as params\n");
goto EXIT;
}
**message = (char*)malloc (msg_size);**
if (argc > 3) v=1; else v=0; /*are we in verbose mode*/
/* don't start timer until everybody is ok */
MPI_Barrier(MPI_COMM_WORLD);
int t=0;
if( my_id == 0 ) {
startwtime = MPI_Wtime();
// do max_msgs times:
// send message of size msg_size chars to process 1
// receive message of size msg_size chars from process p-1
while(t<max_msgs) {
MPI_Send((char *) message, msg_size, MPI_CHAR, 1 , 0, MPI_COMM_WORLD);
MPI_Recv((char *) message, msg_size, MPI_CHAR, p-1, 0, MPI_COMM_WORLD, &status);
t++;
}
MPI_Barrier(MPI_COMM_WORLD);
endwtime = MPI_Wtime();
elapsed_time = endwtime-startwtime;
bandwidth = 2.0 * max_msgs * msg_size / (elapsed_time);
printf("Number, size of messages: %3d , %3d \n", max_msgs, msg_size);
fflush(stdout);
printf("Wallclock time = %f seconds\n", elapsed_time );
fflush(stdout);
printf("Bandwidth = %f bytes per second\n", bandwidth);
fflush(stdout);
} else if( my_id == p-1 ) {
// do max_msgs times:
// receive message of size msg_size from process to the left
// send message of size msg_size to process to the right (p-1 sends to 0)
while(t<max_msgs) {
MPI_Send((char *) message, msg_size, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
MPI_Recv((char *) message, msg_size, MPI_CHAR, my_id-1, 0, MPI_COMM_WORLD, &status);
t++;
}
} else {
while(t<max_msgs) {
MPI_Send((char *) message, msg_size, MPI_CHAR, my_id+1, 0, MPI_COMM_WORLD);
MPI_Recv((char *) message, msg_size, MPI_CHAR, my_id-1, 0, MPI_COMM_WORLD, &status);
t++;
}
}
MPI_Barrier(MPI_COMM_WORLD);
EXIT:
MPI_Finalize();
return 0;
}
I am not completely sure if this is what you mean, but I will try.
For what I understand, you want to know what is the message being sent. Well, for the code you provide, memory is assign to the message but any real "readable" message is specify. In this line.
message = (char*)malloc (msg_size);
malloc reserves the memory for the messages, so anyone can write it, however, it doesn't provide any initial value. Sometimes, the memory contains other information previously stored and freed. Then, the message being sent is that "garbage" that is before. This is probably what you call hexadecimal (I hope I understand this right).
The type of value in this case is char (defined as MPI_CHAR in the MPI_Send and MPI_Recv functions). Here you can find more data types for MPI.
I will suggest to assign a value to the message with the with my_id and next_id. So you know who is sending to whom.
My program is running and crashes at some point. After scouring over the code, I've come to the conclusion that I don't know enough to figure out why. Can someone offer some help? Below is main(). I'd be happy to post other source files, if you ask, just didn't want to post too much.
Thanks, Scott
int main(int argc, char *argv[])
{
//Global data goes here
int rank, nprocs, i, j, k, rc, chunkSize;
double start, finish, difference;
MPI_Status status;
int *masterArray;
int *slaveArray;
int *subArray;
//Holder for subArrays for reassembly of subArrays
int **arrayOfArrays;
//Beginning and ARRAYSIZE indices of array
Range range;
//Begin execution
//printf("%s", "Entering main()\n");
MPI_Init(&argc, &argv); /* START MPI */
/* DETERMINE RANK OF THIS PROCESSOR */
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//printf("My rank %d\n", rank);
/* DETERMINE TOTAL NUMBER OF PROCESSORS */
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
//printf("Number of processes %d\n", nprocs);
//Compute chunk size
chunkSize = computeChunkSize(ARRAYSIZE, nprocs);
//debug("%s: %d\n", "Chunk size", chunkSize);
// N/#processes
slaveArray = (int *)malloc(sizeof(int) * (chunkSize+1));
//An array of int arrays (a pointer to pointers to ints)
arrayOfArrays = (int **)malloc(sizeof(int *) * (nprocs-1));
/****************************************************************
****************************************************************
************************ MASTER id == 0 ************************
****************************************************************
***************************************************************/
/* MASTER: rank is 0. Problem decomposition- here simple matter of splitting
the master array evenly across the number of worker bees */
if(rank == MASTER)
{
debug("%s", "Entering MASTER process\n");
//Begin timing the runtime of this application
start = MPI_Wtime();
debug("%s: %lg\n", "Start time", start);
//Seed the random number generator
srand(time(NULL));
//Create random array of ints for mpi processing
masterArray = createRandomArray();
debug("%s %d %s %d %s\n", "Master array of random integers from ", BEGIN, " to ", ARRAYSIZE-1, "\n");
/*Create the subArray to be sent to the slaves- malloc returns a pointer
to void, so explicitly coerce the pointer into the desired type with a cast */
subArray = (int *)malloc(sizeof(int) * (chunkSize+1));
//Initalize range
range = (Range){.begin = 0, .end = (ARRAYSIZE/(nprocs-1))};
debug("%s %d %s %d\n", "Range: ", range.begin, " to ", range.end);
//Master decomposes the problem set: begin and end of each subArray sent to slaves
for(i = 1;i < nprocs; i++)
{
//printf("%s", "Inside loop for Master send\n");
range = decomposeProblem(range.begin, range.end, ARRAYSIZE, nprocs, i);
debug("%s %d to %d%s", "Range from decomposition", range.begin, range.end, "\n");
//Index for subArray
k = 0;
//Transfer the slice of the master array to the subArray
for(j = range.begin; j < range.end; j++)
{
subArray[k] = masterArray[j];
//printf("%d\t", subArray[k]);
k++;
}
//printf("%s", "\n");
//Show sub array contents
debug("%s", "Showing subArray before master sends...\n");
showArray(subArray, 0, k);
//printf("%s %d%s", "Send to slave", i, " from master \n");
debug("%s %d%s", "Send to slave", i, " from master \n");
/***************************************************************
****************************************************************
************************ MASTER: SEND **************************
****************************************************************
***************************************************************/
//MPI_Send(buffer,count,type,dest,tag,comm)
rc = MPI_Send(&subArray, chunkSize, MPI_INT, i, 0, MPI_COMM_WORLD);
}
//Blocks until the slaves finish their work and start sending results back to master
/*MPI_Recv is "blocking" in the sense that when the process (in this case
my_rank == 0) reaches the MPI_Recv statement, it will wait until it
actually receives the message (another process sends it). If the other process
is not ready to Send, then the process running on my_rank == 0 will simply
remain idle. If the message is never sent, my_rank == 0 will wait a very long time!*/
for(i = 1;i < nprocs; i++)
{
debug("%s %d%s ", "Receive from slave", i, " to master\n");
/***************************************************************
****************************************************************
************************ MASTER: RECEIVE ***********************
****************************************************************
***************************************************************/
debug("Rank %d approaching master MPI_Probe.\n", rank);
// Probe for an incoming message from process zero
MPI_Probe(rank, 0, MPI_COMM_WORLD, &status);
debug("Rank %d going by MPI_Probe.\n", rank);
// When probe returns, the status object has the size and other
// attributes of the incoming message. Get the size of the message
MPI_Get_count(&status, MPI_INT, &chunkSize);
rc = MPI_Recv(&slaveArray, chunkSize, MPI_INT, i, 0, MPI_COMM_WORLD, &status);
debug("Slave %d dynamically received %d numbers from 0.\n", rank, chunkSize);
//Store subArray in 2D array
debug("%s", "Storing subArray in 2DArray...\n");
arrayOfArrays[i-1] = slaveArray;
}
//rebuild entire sorted array from sorted subarrays
reconstructArray(arrayOfArrays);
//starting with smallest value, validate that each element is <= next element
validateArray(arrayOfArrays);
//Finish timing the runtime of this application
finish = MPI_Wtime();
//Compute the runtime
difference = finish-start;
//Inform user
debug("%s", "Exiting MASTER process\n");
debug("%s %lg", "Time for completion:", difference);
}
/****************************************************************
****************************************************************
************************* End MASTER ***************************
****************************************************************
***************************************************************/
/****************************************************************
****************************************************************
************************ SLAVE id > 1 **************************
****************************************************************
***************************************************************/
else
{
debug("%s", "Entering SLAVE process\n");
//by process id
debug("%s %d%s", "Receive in slave", rank, " from master \n");
debug("Rank %d approaching Slave MPI_Probe.\n", rank);
// Probe for an incoming message from process zero
MPI_Probe(MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
debug("Rank %d going by Slave MPI_Probe.\n", rank);
// When probe returns, the status object has the size and other
// attributes of the incoming message. Get the size of the message
MPI_Get_count(&status, MPI_INT, &chunkSize);
debug("Count %d and chunkSize %d after Slave MPI_Get_count.\n", rank, chunkSize);
/***************************************************************
***************************************************************
******************** SLAVE: RECEIVE ***************************
***************************************************************
***************************************************************/
rc = MPI_Recv(&subArray, chunkSize, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
debug("%d dynamically received %d numbers from 0.\n", rank, chunkSize);
/*Store the received subArray in the slaveArray for processing and sending back
to master*/
slaveArray = subArray;
//Take a look at incoming subArray: size = N/#processes)
debug("%s ", "Show the slaveArray contents in slave receive\n");
debug("Before bubblesort: start %d, finish: %d\n", (rank-1) * chunkSize, rank * chunkSize);
//showArray(slaveArray, (rank-1) * chunkSize, rank * chunkSize);
//Running the actual sorting algorithm on the current slaves subArray
//bubble(slaveArray, ARRAYSIZE);
//Return sorted subArray back to the master by process id
debug("%s %d%s", "Send from slave", i, " to master \n");
/***************************************************************
****************************************************************
************************ SLAVE: SEND ***************************
****************************************************************
***************************************************************/
//MPI_Send(buffer,count,type,dest,tag,comm)
rc = MPI_Send(&slaveArray, chunkSize, MPI_INT, 0, 0, MPI_COMM_WORLD);
debug("%s", "Exiting SLAVE process\n");
}
/****************************************************************
****************************************************************
************************* END SLAVE ****************************
****************************************************************
***************************************************************/
//Clean up memory
//free(subArray);
//free(masterArray);
//free(slaveArray);
//free(arrayOfArrays);
rc = MPI_Get_count(&status, MPI_INT, &chunkSize);
debug("Process %d: received %d int(s) from process %d with tag %d \n", rank, chunkSize, status.MPI_SOURCE, status.MPI_TAG);
/* EXIT MPI */
MPI_Finalize();
debug("%s", "Exiting main()\n");
return 0;
}
Check that chunkSize >= 0, nProcs >= 2, and that malloc does not return null. I mean, add code to do this every time and for every malloc, and exit if these conditions are not true -- not just put in temporary debugging.
This loop might overflow bounds:
for(j = range.begin; j < range.end; j++)
{
subArray[k] = masterArray[j];
k++;
}
You didn't show the code where masterArray is allocated. (and you didn't pass nprocs to that function either, so how can it match up with ARRAYSIZE/(nprocs-1) ?
Also, subArray has chunkSize+1 elements, but range.end is defined as ARRAYSIZE/(nprocs-1). Based on the code you've shown (which doesn't include ARRAYSIZE, nor how chunkSize and nprocs are actually calculated), there's no reason to believe that we will always have chunkSize+1 <= ARRAYSIZE/(nprocs-1).
To avoid random segfaults, you should always, always check that an array index is within the bounds of an array , before using the [] operator.
Ok, maybe it'd be easier to show specific moments in the code to help me figure this out. I tried to create a function that creates an int * array passed in by reference that tests whether array is null and whether it is the size I want it to be. Below that is the caller. One thing I noticed is that the sizeof(buffer) call doesn't return what I was thinking it would. So, how else can I make that check? Also, the caller, createRandomArray, is called by having an int * passed into it. Can you pass by reference as deep as you want? Am I using the correct syntax to make sure that the masterArray gets populated in the caller (main()) with call by reference?
void safeMalloc(int *buffer, int size, int line_num)
{
buffer = (int *)malloc(sizeof(int) * size);
//Test that malloc allocated at least some memory
if(buffer == NULL)
{
debug("ERROR: cannot allocate any memory for line %d\n", line_num);
perror(NULL);
exit(EXIT_FAILURE);
}
else
debug("Successfully created the array through malloc()\n");
//Test that malloc allocated the correct amount of memory
if(sizeof(buffer) != size)
{
debug("ERROR: Created %d bytes array instead of %d bytes through malloc() on line %d.\n", sizeof(buffer), size, line_num);
perror(NULL);
exit(EXIT_FAILURE);
}
}
void createRandomArray(int *masterArray)
{
int i;
debug("Entering createRandomArray()\n");
safeMalloc(masterArray, ARRAYSIZE, 21);
for(i = BEGIN;i < ARRAYSIZE;i++)
{
masterArray[i] = (rand() % (ARRAYSIZE - BEGIN)) + BEGIN;
debug("%d ", masterArray[i]);
}
debug("\n");
debug("\n Exiting createRandomArray()\n");
}
I am working on a project of converting a Point to Point Communication to a Collective Communication.
Essentially, what I would like to do is use MPI_Scatterv instead of MPI_Send and MPI_Recv. What I am having trouble determining is the correct arguments for Scatterv.
Here is the function that I am working in:
void read_block_vector (
char *s, /* IN - File name */
void **v, /* OUT - Subvector */
MPI_Datatype dtype, /* IN - Element type */
int *n, /* OUT - Vector length */
MPI_Comm comm) /* IN - Communicator */
{
int datum_size; /* Bytes per element */
int i;
FILE *infileptr; /* Input file pointer */
int local_els; /* Elements on this proc */
MPI_Status status; /* Result of receive */
int id; /* Process rank */
int p; /* Number of processes */
int x; /* Result of read */
datum_size = get_size (dtype);
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
/* Process p-1 opens file, determines number of vector
elements, and broadcasts this value to the other
processes. */
if (id == (p-1)) {
infileptr = fopen (s, "r");
if (infileptr == NULL) *n = 0;
else fread (n, sizeof(int), 1, infileptr);
}
MPI_Bcast (n, 1, MPI_INT, p-1, comm);
if (! *n) {
if (!id) {
printf ("Input file '%s' cannot be opened\n", s);
fflush (stdout);
}
}
/* Block mapping of vector elements to processes */
local_els = BLOCK_SIZE(id,p,*n);
/* Dynamically allocate vector. */
*v = my_malloc (id, local_els * datum_size);
if (id == (p-1)) {
for (i = 0; i < p-1; i++) {
x = fread (*v, datum_size, BLOCK_SIZE(i,p,*n),
infileptr);
MPI_Send (*v, BLOCK_SIZE(i,p,*n), dtype, i, DATA_MSG,
comm);
}
x = fread (*v, datum_size, BLOCK_SIZE(id,p,*n),
infileptr);
fclose (infileptr);
} else {
MPI_Recv (*v, BLOCK_SIZE(id,p,*n), dtype, p-1, DATA_MSG,
comm, &status);
}
// My Attempt at making this collective communication:
if(id == (p-1))
{
x = fread(*v,datum_size,*n,infileptr);
for(i = 0; i < p; i++)
{
size[i] = BLOCK_SIZE(i,p,*n);
}
//x = fread(*v,datum_size,BLOCK_SIZE(id, p, *n),infileptr);
fclose(infileptr);
}
MPI_Scatterv(v,send_count,send_disp, dtype, storage, size[id], dtype, p-1, comm);
}
Any help would be appreciated.
Thank you
It's easier for people to answer your question if you post a small, self-contained, reproducible example.
For the Scatterv, you need to provide the list of counts to send to each process, which appears to be your size[] array, and the displacements within the data to send out. The mechanics of Scatter vs Scatterv are described in some detail in this answer. Trying to infer what all your variables and un-supplied functions/macros do, the example below scatters a file out to the processes.
But also note that if you're doing this, it's not much harder to actually use MPI-IO to coordinate the file access directly, avoiding the need to have one process read all of the data in the first place. Code for that is also supplied.
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
int main(int argc, char **argv) {
int id, p;
int *block_size;
int datasize = 0;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
block_size = malloc(p * sizeof(int));
for (int i=0; i<p; i++) {
block_size[i] = i + 1;
datasize += block_size[i];
}
/* create file for reading */
if (id == p-1) {
char *data = malloc(datasize * sizeof(char));
for (int i=0; i<datasize; i++)
data[i] = 'a' + i;
FILE *f = fopen("data.dat","wb");
fwrite(data, sizeof(char), datasize, f);
fclose(f);
printf("Initial data: ");
for (int i=0; i<datasize; i++)
printf("%c", data[i]);
printf("\n");
free(data);
}
if (id == 0) printf("---Using MPI-Scatterv---\n");
/* using scatterv */
int local_els = block_size[id];
char *v = malloc ((local_els + 1) * sizeof(char));
char *all;
int *counts, *disps;
counts = malloc(p * sizeof(int));
disps = malloc(p * sizeof(int));
/* counts.. */
for(int i = 0; i < p; i++)
counts[i] = block_size[i];
/* and displacements (where the data starts within the send buffer) */
disps[0] = 0;
for(int i = 1; i < p; i++)
disps[i] = disps[i-1] + counts[i-1];
if(id == (p-1))
{
all = malloc(datasize*sizeof(char));
FILE *f = fopen("data.dat","rb");
int x = fread(all,sizeof(char),datasize,f);
fclose(f);
}
MPI_Scatterv(all, counts, disps, MPI_CHAR, v, local_els, MPI_CHAR, p-1, MPI_COMM_WORLD);
if (id == (p-1)) {
free(all);
}
v[local_els] = '\0';
printf("[%d]: %s\n", id, v);
/* using MPI I/O */
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD); /* only for syncing output to screen */
if (id == 0) printf("---Using MPI-IO---\n");
for (int i=0; i<local_els; i++)
v[i] = 'X';
/* create the file layout - the subarrays within the 1d array of data */
MPI_Datatype myview;
MPI_Type_create_subarray(1, &datasize, &local_els, &(disps[id]),
MPI_ORDER_C, MPI_CHAR, &myview);
MPI_Type_commit(&myview);
MPI_File mpif;
MPI_Status status;
MPI_File_open(MPI_COMM_WORLD, "data.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &mpif);
MPI_File_set_view(mpif, (MPI_Offset)0, MPI_CHAR, myview, "native", MPI_INFO_NULL);
MPI_File_read_all(mpif, v, local_els, MPI_CHAR, &status);
MPI_File_close(&mpif);
MPI_Type_free(&myview);
v[local_els] = '\0';
printf("[%d]: %s\n", id, v);
free(v);
free(counts);
free(disps);
MPI_Finalize();
return 0;
}
Running this gives (output re-ordered for clarity)
$ mpirun -np 6 ./foo
Initial data: abcdefghijklmnopqrstu
---Using MPI-Scatterv---
[0]: a
[1]: bc
[2]: def
[3]: ghij
[4]: klmno
[5]: pqrstu
---Using MPI-IO---
[0]: a
[1]: bc
[2]: def
[3]: ghij
[4]: klmno
[5]: pqrstu