Related
I want to send a struct that contains some variables and an array dynamically allocated.
I read that it's not possible to send all at one because of the dynamic array. I should send first a message with all the other variables and then another message with the dynamic array.
Because of that, I thought I could send directly a copy of the content of the struct in an dynamic array of BYTE (look at the code for more details).
I wrote the following code.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <mpi.h>
// I read MPI_BYTE is an unsigned char
typedef unsigned char BYTE;
typedef struct Message
{
int id;
int detectNr;
char *detection;
} Msg;
int main() {
int size, rank;
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
printf("Invalid number of processes.\n");
return -1;
}
// initialize an empty message
Msg msg = {.id = 1, .detectNr = 0, .detection = NULL};
// here we should take the measurements
// for now suppose there are 10
msg.detectNr = 10;
msg.detection = malloc(sizeof(char) * msg.detectNr);
for (int i = 0; i < msg.detectNr; i++)
msg.detection[i] = 'a' + i;
if(rank != 0) {
// put the data inside a buffer of BYTE
int bufferSize = sizeof(int) + sizeof(int) + sizeof(char) * msg.detectNr;
BYTE *buffer = malloc(sizeof(BYTE) * bufferSize);
memcpy(buffer, &msg.id, sizeof(int));
memcpy(buffer + sizeof(int), &msg.detectNr, sizeof(int));
memcpy(buffer + (2 * sizeof(int)), &msg.detection, sizeof(char) * msg.detectNr);
// send buffer to process 0
MPI_Send(buffer, bufferSize, MPI_BYTE, 0, 0, MPI_COMM_WORLD);
free(buffer);
free(msg.detection);
} else {
for (int i = 1; i < size; i++) {
int bufferSize;
BYTE *buffer;
MPI_Status status;
// initialize an empty message
Msg rcv= {.id = 0, .detectNr = 0, .detection = NULL};
// probe for an incoming message from process zero
MPI_Probe(i, 0, MPI_COMM_WORLD, &status);
// when probe returns, the status object has the size and other
// attributes of the incoming message
// get the message size
MPI_Get_count(&status, MPI_BYTE, &bufferSize);
printf("\nProcess %d: buffer size: %d\n", rank, bufferSize);
// allocate a buffer to hold the incoming data
buffer = malloc(sizeof(BYTE) * bufferSize);
// now receive the message with the allocated buffer
MPI_Recv(buffer, bufferSize, MPI_BYTE, i, 0, MPI_COMM_WORLD, &status);
// copy the data from the buffer to the message
memcpy(&rcv.id, buffer, sizeof(int));
memcpy(&rcv.detectNr, buffer + sizeof(int), sizeof(int));
memcpy(&rcv.detection, buffer + (2 * sizeof(int)), sizeof(char) * rcv.detectNr);
printf("Process %d: id: %d\n", rank, rcv.id);
printf("Process %d: detectNr: %d\n", rank, rcv.detectNr);
printf("Process %d: detection: %s\n", rank, rcv.detection);
free(rcv.detection);
free(buffer);
}
}
MPI_Finalize();
return 0;
}
Unfortunately it doesn't work, here below the result.
Process 0: buffer size: 18
Process 1: id: 1
Process 1: detectNr: 10
YOUR APPLICATION TERMINATED WITH THE EXIT STRING: Segmentation fault (signal 11)
It looks like, and probably it is so, if the last part of the buffer used for receiving is empty.
I don't understand why it happens, I'm not trying to send the original array detection, but instead I'm allocating an entire new dynamic array and I'm copying all the values inside.
Can you help me to solve the problem, or can you explain to me why it doesn't works?
I don't know if this can help, I'm developing in C, under Ubuntu, with VSCode as editor and gcc as compiler.
Ok, basically the problem wasn't MPI, but the memory allocation, so my fault.
Here below the right code:
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <mpi.h>
// I read MPI_BYTE is an unsigned char
typedef unsigned char BYTE;
typedef struct Message
{
int id;
int detectNr;
char *detection;
} Msg;
int main() {
int size, rank;
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
printf("Invalid number of processes.\n");
return -1;
}
if(rank != 0) {
// initialize an empty message
Msg msg = {.id = 1, .detectNr = 0, .detection = NULL};
// here we should take the measurements
// for now suppose there are 10
msg.detectNr = 10;
msg.detection = malloc(sizeof(char) * msg.detectNr);
for (i = 0; i < msg.detectNr; i++)
msg.detection[i] = 'a' + i;
printf("Process %d: id: %d\n", rank, msg.id);
printf("Process %d: detectNr: %d\n", rank, msg.detectNr);
for (i = 0; i < msg.detectNr; i++)
printf("Process %d: msg.detect[%d]: %c\n", rank, i, msg.detection[i]);
for (int i = 0; i < msg.detectNr; i++)
msg.detection[i] = 'a' + i;
printf("Process %d: i: %d\n", rank, i);
// put the data inside a buffer of BYTE
int bufferSize = sizeof(int) + sizeof(int) + sizeof(char) * msg.detectNr;
BYTE *buffer = malloc(sizeof(BYTE) * bufferSize);
memcpy(buffer, &msg.id, sizeof(int));
memcpy(buffer + sizeof(int), &msg.detectNr, sizeof(int));
memcpy(buffer + (2 * sizeof(int)), msg.detection, sizeof(char) * msg.detectNr);
printf("\nProcess %d: buffer size: %d\n", rank, bufferSize);
// send buffer to process 0
MPI_Send(buffer, bufferSize, MPI_BYTE, 0, 0, MPI_COMM_WORLD);
free(buffer);
free(msg.detection);
} else {
for (int i = 1; i < size; i++) {
int bufferSize;
BYTE *buffer;
MPI_Status status;
// initialize an empty message
Msg rcv = {.id = 0, .detectNr = 0, .detection = NULL};
// probe for an incoming message from process zero
MPI_Probe(i, 0, MPI_COMM_WORLD, &status);
// when probe returns, the status object has the size and other
// attributes of the incoming message
// get the message size
MPI_Get_count(&status, MPI_BYTE, &bufferSize);
printf("\nProcess %d: buffer size: %d\n", rank, bufferSize);
// allocate a buffer to hold the incoming data
buffer = malloc(sizeof(BYTE) * bufferSize);
// now receive the message with the allocated buffer
MPI_Recv(buffer, bufferSize, MPI_BYTE, i, 0, MPI_COMM_WORLD, &status);
// copy the data from the buffer to the message
memcpy(&rcv.id, buffer, sizeof(int));
memcpy(&rcv.detectNr, buffer + sizeof(int), sizeof(int));
rcv.detection = malloc(sizeof(char) * rcv.detectNr);
memcpy(rcv.detection, buffer + (2 * sizeof(int)), sizeof(char) * rcv.detectNr);
printf("Process %d: id: %d\n", rank, rcv.id);
printf("Process %d: detectNr: %d\n", rank, rcv.detectNr);
for (i = 0; i < rcv.detectNr; i++)
printf("Process %d: rcv.detect[%d]: %c\n", rank, i, rcv.detection[i]);
printf("Process %d: i: %d\n", rank, i);
free(rcv.detection);
free(buffer);
}
}
MPI_Finalize();
return 0;
}
First, I precise that I am french and my english is not really good.
I am working on MPI application and I have some problems and I hope that somebody can help me.
As reported in the title of my post, I try to use a thread to listen when I have to kill my application and then call the MPI_Finalize function.
However, my application does not finish correcty.
More precisely, I obtain the following message:
[XPS-2720:27441] * Process received signal *
[XPS-2720:27441] Signal: Segmentation fault (11)
[XPS-2720:27441] Signal code: Address not mapped (1)
[XPS-2720:27441] Failing at address: 0x7f14077a3b6d
[XPS-2720:27440] * Process received signal *
[XPS-2720:27440] Signal: Segmentation fault (11)
[XPS-2720:27440] Signal code: Address not mapped (1)
[XPS-2720:27440] Failing at address: 0x7fb11d07bb6d
mpirun noticed that process rank 1 with PID 27440 on node lagniez-XPS-2720 exited on signal 11 (Segmentation fault).
My slave code is:
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <sys/types.h>
#include <pthread.h>
#include <cassert>
#define send_data_tag 1664
#define send_kill_tag 666
void *finilizeMPICom(void *intercomm)
{
printf("the finilizeMPICom was called\n");
MPI_Comm parentcomm = * ((MPI_Comm *) intercomm);
MPI_Status status;
int res;
// sleep(10);
MPI_Recv(&res, 1, MPI_INT, 0, send_kill_tag, parentcomm, &status);
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("we receive something %d -- %d\n", rank, res);
MPI_Finalize();
exit(0);
}// finilizeMPICom
int main( int argc, char *argv[])
{
int numtasks, rank, len, rc;
char hostname[MPI_MAX_PROCESSOR_NAME];
int provided, claimed;
rc = MPI_Init_thread(0, 0, MPI_THREAD_MULTIPLE, &provided);
MPI_Query_thread( &claimed );
if (rc != MPI_SUCCESS || provided != 3)
{
printf ("Error starting MPI program. Terminating.\n");
MPI_Abort(MPI_COMM_WORLD, rc);
}
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm parentcomm;
MPI_Comm_get_parent(&parentcomm);
/* create a second thread to listen when we have to kill the program */
pthread_t properlyKill;
if(pthread_create(&properlyKill, NULL, finilizeMPICom, (void *) &parentcomm))
{
fprintf(stderr, "Error creating thread\n");
return 0;
}
assert(parentcomm != MPI_COMM_NULL);
MPI_Status status;
int root_process, ierr, num_rows_to_receive;
int mode;
MPI_Recv( &mode, 1, MPI_INT, 0, send_data_tag, parentcomm, &status);
printf("c The solver works in the mode %d\n", mode);
printf("I sent a message %d\n", rank);
// if(rank != 1) sleep(100);
int res = 1;
MPI_Send(&res, 1, MPI_INT, 0, send_data_tag, parentcomm);
printf("we want to listen for somethiing %d\n", rank);
int rescc = 1;
MPI_Recv(&rescc, 1, MPI_INT, 0, send_data_tag, parentcomm, &status);
printf("I received the message %d %d\n", rescc, rank);
if(rescc == 1000)
{
printf("~~~~~~~~>>> I print the solution %d\n", rank);
int res3 = 1001;
MPI_Send(&res3, 1, MPI_INT, 0, send_data_tag, parentcomm);
}
else printf("I do not understand %d\n", rank);
printf("I wait the thread to kill the programm %d\n", rank);
pthread_join(properlyKill, (void**)&(res));
return 0;
}
For the master I have:
int main(int argc, char **argv)
{
Parser *p = new Parser("slave.xml");
MPI_Init(&argc, &argv);
if(p->method == "concurrent")
{
ConcurrentManager cc(p->instance, p->solvers);
cc.run();
}
else
{
cerr << "c The only available methods are: concurrent, eps (Embarrassingly Parallel Search) or tree" << endl;
exit(1);
}
delete(p);
MPI_Finalize();
exit(0);
}// main
/**
Create a concurrent manager (means init the data structures to run
the solvers).
#param[in] _instance, the benchmark path
#param[in] _solvers, the set of solvers that will be ran
*/
ConcurrentManager::ConcurrentManager(string _instance, vector<Solver> &_solvers) :
instance(_instance), solvers(_solvers)
{
cout << "c\nc Concurrent manager called" << endl;
nbSolvers = _solvers.size();
np = new int[nbSolvers];
cmds = new char*[nbSolvers];
arrayOfArgs = new char **[nbSolvers];
infos = new MPI_Info[nbSolvers];
for(int i = 0 ; i<nbSolvers ; i++)
{
np[i] = solvers[i].npernode;
cmds[i] = new char[(solvers[i].executablePath).size() + 1];
strcpy(cmds[i], (solvers[i].executablePath).c_str());
arrayOfArgs[i] = new char *[(solvers[i].options).size() + 1];
for(unsigned int j = 0 ; j<(solvers[i].options).size() ; j++)
{
arrayOfArgs[i][j] = new char[(solvers[i].options[j]).size() + 1];
strcpy(arrayOfArgs[i][j], (solvers[i].options[j]).c_str());
}
arrayOfArgs[i][(solvers[i].options).size()] = NULL;
MPI_Info_create(&infos[i]);
char hostname[solvers[i].hostname.size()];
strcpy(hostname, solvers[i].hostname.c_str());
MPI_Info_set(infos[i], "host", hostname);
}
sizeComm = 0;
}// constructor
/**
Wait that at least one process finish and return the code
SOLUTION_FOUND.
#param[in] intercomm, the communicator
*/
void ConcurrentManager::waitForSolution(MPI_Comm &intercomm)
{
MPI_Status arrayStatus[sizeComm], status;
MPI_Request request[sizeComm];
int val[sizeComm], flag;
for(int i = 0 ; i<sizeComm ; i++) MPI_Irecv(&val[i], 1, MPI_INT, i, TAG_MSG, intercomm, &request[i]);
bool solutionFound = false;
while(!solutionFound)
{
for(int i = 0 ; i<sizeComm ; i++)
{
MPI_Test(&request[i], &flag, &arrayStatus[i]);
if(flag)
{
printf("---------------------> %d reveived %d\n", i , val[i]);
if(val[i] == SOLUTION_FOUND)
{
int msg = PRINT_SOLUTION;
MPI_Send(&msg, 1, MPI_INT, i, TAG_MSG, intercomm); // ask to print the solution
int msgJobFinished;
MPI_Recv(&msgJobFinished, 1, MPI_INT, i, TAG_MSG, intercomm, &status); // wait the answer
assert(msgJobFinished == JOB_FINISHED);
cout << "I am going to kill everybody" << endl;
int msgKill[sizeComm];
for(int j = 0 ; j<sizeComm ; j++)
{
msgKill[i] = STOP_AT_ONCE;
MPI_Send(&msgKill[i], 1, MPI_INT, j, TAG_KILL, intercomm);
}
solutionFound = true;
break;
} else
{
printf("restart the communication for %d\n", i);
MPI_Irecv(&val[i], 1, MPI_INT, i, TAG_MSG, intercomm, &request[i]);
}
}
}
}
}// waitForSolution
/**
Run the solver.
*/
void ConcurrentManager::run()
{
MPI_Comm intercomm;
int errcodes[solvers.size()];
MPI_Comm_spawn_multiple(nbSolvers, cmds, arrayOfArgs, np, infos, 0, MPI_COMM_WORLD, &intercomm, errcodes);
MPI_Comm_remote_size(intercomm, &sizeComm);
cout << "c Solvers are now running: " << sizeComm << endl;
int msg = CONCU_MODE;
for(int i = 0 ; i<sizeComm ; i++) MPI_Send(&msg, 1, MPI_INT, i, TAG_MSG, intercomm); // init the working mode
waitForSolution(intercomm);
}// run
I know that I put a lot of code :(
But, I do not know where is the problem.
Please, help me :)
Best regards.
The MPI documentation for how MPI interacts with threads demands that the call to MPI_Finalize() be performed by the main thread -- that is, the same one that initialized MPI. In your case, that happens also to be your process's initial thread.
In order to satisfy MPI's requirements, you could reorganize your application so that the initial thread is the one that waits for a kill signal and then shuts down MPI. The other work it currently does would then need to be moved to a different thread.
I have the following code written in C with MPI:
#include <mpi.h>
#include <stdio.h>
int main(int argc, char *argv[])
{
int size, rank;
MPI_Status status;
int buf[1000];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) {
int i = 0;
while (i != 1000) {
buf[i] = i;
i++;
}
MPI_Send(buf, 999, MPI_INT, 1, 1, MPI_COMM_WORLD);
printf("msg has been sent\n");
}
if (rank == 1) {
int sz = sizeof(buf);
int lst = buf[sz-1];
MPI_Recv(buf, 999, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
printf("la taille du buf %d et dernier %d", sz, lst);
}
MPI_Finalize();
}
And after run it it gives this message:
msg has been sente
[blitzkrieg-TravelMate-P253:03395] *** Process received signal ***
[blitzkrieg-TravelMate-P253:03395] Signal: Segmentation fault (11)
[blitzkrieg-TravelMate-P253:03395] Signal code: Address not mapped (1)
[blitzkrieg-TravelMate-P253:03395] Failing at address: 0xbfee8574
[blitzkrieg-TravelMate-P253:03395] [0] [0xb772d40c]
[blitzkrieg-TravelMate-P253:03395] [1] mpii(main+0x12f) [0x8048883]
[blitzkrieg-TravelMate-P253:03395] [2] /lib/i386-linux-gnu/libc.so.6(__libc_start_main+0xf3) [0xb74c84d3]
[blitzkrieg-TravelMate-P253:03395] [3] mpii() [0x80486c1]
[blitzkrieg-TravelMate-P253:03395] *** End of error message ***
mpirun noticed that process rank 1 with PID 3395 on node blitzkrieg-
TravelMate-P253 exited on signal 11 (Segmentation fault).
Any suggestion will help thnx.
The stack trace shows that the error is not in the MPI_Recv as the question title suggests. The error is actually here:
int sz = sizeof(buf);
int lst = buf[sz-1]; // <---- here
Since buf is an array of int and sizeof(buf) returns its size in bytes, sz is set to 4 times the number of elements in the array. Accessing buf[sz-1] goes way beyond the bounds of buf and into an unmapped memory region above the stack of the process.
You should divide the total size of the array by the size of one of its elements, e.g. the first one:
int sz = sizeof(buf) / sizeof(buf[0]);
int lst = buf[sz-1];
I am reading "Using MPI" and try to execute the code myself. There is a nonblocking broadcast code in Chapter 6.2. I tried to run with my own callbacks instead of MPI_NULL_COPY_FN or MPI_NULL_DELETE_FN. Here is my code, it is very similar to the code in book, but the callbacks will not be called. I am not sure why. There are no warnings or errors when I compile with -Wall. Could you help me please? Thanks a lot.
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
static int ibcast_keyval=MPI_KEYVAL_INVALID;
typedef struct
{
MPI_Comm comm;
int ordering_tag;
} Ibcast_syshandle;
typedef struct
{
MPI_Request *req_array;
MPI_Status *stat_array;
int num_sends;
int num_recvs;
} Ibcast_handle;
int Ibcast_work(Ibcast_handle *handle)
{
if(handle->num_recvs==0)
MPI_Startall(handle->num_sends, handle->req_array);
else
MPI_Startall(handle->num_recvs, &(handle->req_array[handle->num_sends]));
return MPI_SUCCESS;
}
int Ibcast_copy(MPI_Comm oldcomm, int keyval, void *extra, void *attr_in, void *attr_out, int *flag)
{
Ibcast_syshandle *syshandle=(Ibcast_syshandle *)attr_in;
Ibcast_syshandle *new_syshandle;
printf("keyval=%d\n", keyval);
fflush(stdout);
if((keyval==MPI_KEYVAL_INVALID)||(keyval!=ibcast_keyval)||(syshandle==NULL))
return 1;
new_syshandle=(Ibcast_syshandle *)malloc(sizeof(Ibcast_syshandle));
new_syshandle->ordering_tag=0;
MPI_Comm_dup(syshandle->comm, &(new_syshandle->comm));
{
int rank;
MPI_Comm_rank(new_syshandle->comm, &rank);
printf("Ibcast_copy called from %d\n", rank);
fflush(stdout);
}
*(void **)attr_out=(void *)new_syshandle;
*flag=1;
return MPI_SUCCESS;
}
int Ibcast_delete(MPI_Comm comm, int keyval, void *attr_val, void *extra)
{
Ibcast_syshandle *syshandle=(Ibcast_syshandle *)attr_val;
{
int rank;
MPI_Comm_rank(syshandle->comm, &rank);
printf("Ibcast_delete called from %d\n", rank);
fflush(stdout);
}
if((keyval==MPI_KEYVAL_INVALID)||(keyval!=ibcast_keyval)||(syshandle==NULL))
return 1;
MPI_Comm_free(&(syshandle->comm));
free(syshandle);
return MPI_SUCCESS;
}
int Ibcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm, Ibcast_handle **handle_out)
{
Ibcast_syshandle *syshandle;
Ibcast_handle *handle;
int flag, mask, relrank;
int retn, size, rank;
int req_no=0;
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
if(size==1)
{
(*handle_out)=NULL;
return MPI_SUCCESS;
}
if(ibcast_keyval==MPI_KEYVAL_INVALID)
// MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN, &ibcast_keyval, NULL);
MPI_Comm_create_keyval(Ibcast_copy, Ibcast_delete, &ibcast_keyval, NULL);
MPI_Comm_get_attr(comm, ibcast_keyval, (void **)&syshandle, &flag);
if(flag==0)
{
syshandle=(Ibcast_syshandle *)malloc(sizeof(Ibcast_syshandle));
syshandle->ordering_tag=0;
MPI_Comm_dup(comm, &(syshandle->comm));
MPI_Comm_set_attr(comm, ibcast_keyval, (void *)syshandle);
}
handle=(Ibcast_handle *)malloc(sizeof(Ibcast_handle));
handle->num_sends=handle->num_recvs=0;
mask=0x1;
relrank=(rank-root+size)%size;
while((mask&relrank)==0 && mask<size)
{
if((relrank|mask)<size)
++handle->num_sends;
mask<<=1;
}
if(mask<size)
++handle->num_recvs;
handle->req_array=(MPI_Request *)malloc(sizeof(MPI_Request)*(handle->num_sends+handle->num_recvs));
handle->stat_array=(MPI_Status *)malloc(sizeof(MPI_Status)*(handle->num_sends+handle->num_recvs));
mask=0x1;
relrank=(rank-root+size)%size;
while((mask&relrank)==0 && mask<size)
{
if((relrank|mask)<size)
MPI_Send_init(buf, count, datatype, ((relrank|mask)+root)%size, syshandle->ordering_tag, syshandle->comm, &(handle->req_array[req_no++]));
mask<<=1;
}
if(mask<size)
MPI_Recv_init(buf, count, datatype, ((relrank & (~mask))+root)%size, syshandle->ordering_tag, syshandle->comm, &(handle->req_array[req_no++]));
retn=Ibcast_work(handle);
++(syshandle->ordering_tag);
(*handle_out)=handle;
return retn;
}
int Ibcast_wait(Ibcast_handle **handle_out)
{
Ibcast_handle *handle=(*handle_out);
int retn, i;
if(handle==NULL)
return MPI_SUCCESS;
if(handle->num_recvs!=0)
{
MPI_Waitall(handle->num_recvs, &handle->req_array[handle->num_sends], &handle->stat_array[handle->num_sends]);
MPI_Startall(handle->num_sends, handle->req_array);
}
retn=MPI_Waitall(handle->num_sends, handle->req_array, handle->stat_array);
for(i=0; i<(handle->num_sends+handle->num_recvs);i++)
MPI_Request_free(&(handle->req_array[i]));
free(handle->req_array);
free(handle->stat_array);
free(handle);
*handle_out=NULL;
return retn;
}
int main( int argc, char *argv[] )
{
int buf1[10], buf2[20];
int rank, i;
Ibcast_handle *ibcast_handle_1, *ibcast_handle_2;
MPI_Init( &argc, &argv );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
if (rank == 0) {
for (i=0; i<10; i++) buf1[i] = i;
for (i=0; i<20; i++) buf2[i] = -i;
}
Ibcast( buf1, 10, MPI_INT, 0, MPI_COMM_WORLD, &ibcast_handle_1 );
Ibcast( buf2, 20, MPI_INT, 0, MPI_COMM_WORLD, &ibcast_handle_2 );
Ibcast_wait( &ibcast_handle_1 );
Ibcast_wait( &ibcast_handle_2 );
for (i=0; i<10; i++) {
if (buf1[i] != i) printf( "buf1[%d] = %d on %d\n", i, buf1[i], rank );
}
for (i=0; i<20; i++) {
if (buf2[i] != -i) printf( "buf2[%d] = %d on %d\n", i, buf2[i], rank );
}
MPI_Finalize();
return 0;
}
The callback functions are there to copy and delete the created attributes when a communicator is duplicated or deleted, or just when the attribute is deleted. The callback functions are necessary because the attributes can be completely arbitrary.
So here's a stripped down version of your code that does work (creating such a minimal example being a useful way to both track down problems and get help on sites like SO):
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
static int ibcast_keyval;
int Ibcast_copy(MPI_Comm oldcomm, int keyval, void *extra, void *attr_in, void *attr_out, int *flag)
{
printf("In ibcast_copy: keyval = %d\n", keyval);
*flag = 1;
return MPI_SUCCESS;
}
int Ibcast_delete(MPI_Comm comm, int keyval, void *attr_val, void *extra)
{
printf("In ibcast_delete: keyval = %d\n", keyval);
return MPI_SUCCESS;
}
int main( int argc, char *argv[] )
{
int rank, i;
int attr=2;
MPI_Init( &argc, &argv );
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
MPI_Comm duped_comm;
MPI_Comm_create_keyval(Ibcast_copy, Ibcast_delete, &ibcast_keyval, NULL);
MPI_Comm_set_attr( MPI_COMM_WORLD, ibcast_keyval, &attr);
MPI_Comm_dup( MPI_COMM_WORLD, &duped_comm);
MPI_Comm_free( &duped_comm );
MPI_Comm_delete_attr( MPI_COMM_WORLD, ibcast_keyval );
MPI_Finalize();
return 0;
}
Here we create the keyval with the callbacks, set the attribute corresponding to the key, and then dup MPI_COMM_WORLD (invoking the copy callback) and then free the dup'ed communicator and deleting the attribute from COMM_WORLD (invoking the delete callback both times:)
$ mpirun -np 1 ./comm-attr
In ibcast_copy: keyval = 10
In ibcast_delete: keyval = 10
In ibcast_delete: keyval = 10
In your code, you dup the communicator in Ibcast before setting the attribute, so that the copy callback is never invoked (as there is nothing to copy). You can fix that part by setting the attribute before the dup, but then there is another problem - you call dup and free within the callbacks, which is wrong-way around; those functions are to (indirectly) invoke the callbacks, not vice-versa.
I am working on a project of converting a Point to Point Communication to a Collective Communication.
Essentially, what I would like to do is use MPI_Scatterv instead of MPI_Send and MPI_Recv. What I am having trouble determining is the correct arguments for Scatterv.
Here is the function that I am working in:
void read_block_vector (
char *s, /* IN - File name */
void **v, /* OUT - Subvector */
MPI_Datatype dtype, /* IN - Element type */
int *n, /* OUT - Vector length */
MPI_Comm comm) /* IN - Communicator */
{
int datum_size; /* Bytes per element */
int i;
FILE *infileptr; /* Input file pointer */
int local_els; /* Elements on this proc */
MPI_Status status; /* Result of receive */
int id; /* Process rank */
int p; /* Number of processes */
int x; /* Result of read */
datum_size = get_size (dtype);
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
/* Process p-1 opens file, determines number of vector
elements, and broadcasts this value to the other
processes. */
if (id == (p-1)) {
infileptr = fopen (s, "r");
if (infileptr == NULL) *n = 0;
else fread (n, sizeof(int), 1, infileptr);
}
MPI_Bcast (n, 1, MPI_INT, p-1, comm);
if (! *n) {
if (!id) {
printf ("Input file '%s' cannot be opened\n", s);
fflush (stdout);
}
}
/* Block mapping of vector elements to processes */
local_els = BLOCK_SIZE(id,p,*n);
/* Dynamically allocate vector. */
*v = my_malloc (id, local_els * datum_size);
if (id == (p-1)) {
for (i = 0; i < p-1; i++) {
x = fread (*v, datum_size, BLOCK_SIZE(i,p,*n),
infileptr);
MPI_Send (*v, BLOCK_SIZE(i,p,*n), dtype, i, DATA_MSG,
comm);
}
x = fread (*v, datum_size, BLOCK_SIZE(id,p,*n),
infileptr);
fclose (infileptr);
} else {
MPI_Recv (*v, BLOCK_SIZE(id,p,*n), dtype, p-1, DATA_MSG,
comm, &status);
}
// My Attempt at making this collective communication:
if(id == (p-1))
{
x = fread(*v,datum_size,*n,infileptr);
for(i = 0; i < p; i++)
{
size[i] = BLOCK_SIZE(i,p,*n);
}
//x = fread(*v,datum_size,BLOCK_SIZE(id, p, *n),infileptr);
fclose(infileptr);
}
MPI_Scatterv(v,send_count,send_disp, dtype, storage, size[id], dtype, p-1, comm);
}
Any help would be appreciated.
Thank you
It's easier for people to answer your question if you post a small, self-contained, reproducible example.
For the Scatterv, you need to provide the list of counts to send to each process, which appears to be your size[] array, and the displacements within the data to send out. The mechanics of Scatter vs Scatterv are described in some detail in this answer. Trying to infer what all your variables and un-supplied functions/macros do, the example below scatters a file out to the processes.
But also note that if you're doing this, it's not much harder to actually use MPI-IO to coordinate the file access directly, avoiding the need to have one process read all of the data in the first place. Code for that is also supplied.
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
int main(int argc, char **argv) {
int id, p;
int *block_size;
int datasize = 0;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
block_size = malloc(p * sizeof(int));
for (int i=0; i<p; i++) {
block_size[i] = i + 1;
datasize += block_size[i];
}
/* create file for reading */
if (id == p-1) {
char *data = malloc(datasize * sizeof(char));
for (int i=0; i<datasize; i++)
data[i] = 'a' + i;
FILE *f = fopen("data.dat","wb");
fwrite(data, sizeof(char), datasize, f);
fclose(f);
printf("Initial data: ");
for (int i=0; i<datasize; i++)
printf("%c", data[i]);
printf("\n");
free(data);
}
if (id == 0) printf("---Using MPI-Scatterv---\n");
/* using scatterv */
int local_els = block_size[id];
char *v = malloc ((local_els + 1) * sizeof(char));
char *all;
int *counts, *disps;
counts = malloc(p * sizeof(int));
disps = malloc(p * sizeof(int));
/* counts.. */
for(int i = 0; i < p; i++)
counts[i] = block_size[i];
/* and displacements (where the data starts within the send buffer) */
disps[0] = 0;
for(int i = 1; i < p; i++)
disps[i] = disps[i-1] + counts[i-1];
if(id == (p-1))
{
all = malloc(datasize*sizeof(char));
FILE *f = fopen("data.dat","rb");
int x = fread(all,sizeof(char),datasize,f);
fclose(f);
}
MPI_Scatterv(all, counts, disps, MPI_CHAR, v, local_els, MPI_CHAR, p-1, MPI_COMM_WORLD);
if (id == (p-1)) {
free(all);
}
v[local_els] = '\0';
printf("[%d]: %s\n", id, v);
/* using MPI I/O */
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD); /* only for syncing output to screen */
if (id == 0) printf("---Using MPI-IO---\n");
for (int i=0; i<local_els; i++)
v[i] = 'X';
/* create the file layout - the subarrays within the 1d array of data */
MPI_Datatype myview;
MPI_Type_create_subarray(1, &datasize, &local_els, &(disps[id]),
MPI_ORDER_C, MPI_CHAR, &myview);
MPI_Type_commit(&myview);
MPI_File mpif;
MPI_Status status;
MPI_File_open(MPI_COMM_WORLD, "data.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &mpif);
MPI_File_set_view(mpif, (MPI_Offset)0, MPI_CHAR, myview, "native", MPI_INFO_NULL);
MPI_File_read_all(mpif, v, local_els, MPI_CHAR, &status);
MPI_File_close(&mpif);
MPI_Type_free(&myview);
v[local_els] = '\0';
printf("[%d]: %s\n", id, v);
free(v);
free(counts);
free(disps);
MPI_Finalize();
return 0;
}
Running this gives (output re-ordered for clarity)
$ mpirun -np 6 ./foo
Initial data: abcdefghijklmnopqrstu
---Using MPI-Scatterv---
[0]: a
[1]: bc
[2]: def
[3]: ghij
[4]: klmno
[5]: pqrstu
---Using MPI-IO---
[0]: a
[1]: bc
[2]: def
[3]: ghij
[4]: klmno
[5]: pqrstu