I am working on a project of converting a Point to Point Communication to a Collective Communication.
Essentially, what I would like to do is use MPI_Scatterv instead of MPI_Send and MPI_Recv. What I am having trouble determining is the correct arguments for Scatterv.
Here is the function that I am working in:
void read_block_vector (
char *s, /* IN - File name */
void **v, /* OUT - Subvector */
MPI_Datatype dtype, /* IN - Element type */
int *n, /* OUT - Vector length */
MPI_Comm comm) /* IN - Communicator */
{
int datum_size; /* Bytes per element */
int i;
FILE *infileptr; /* Input file pointer */
int local_els; /* Elements on this proc */
MPI_Status status; /* Result of receive */
int id; /* Process rank */
int p; /* Number of processes */
int x; /* Result of read */
datum_size = get_size (dtype);
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
/* Process p-1 opens file, determines number of vector
elements, and broadcasts this value to the other
processes. */
if (id == (p-1)) {
infileptr = fopen (s, "r");
if (infileptr == NULL) *n = 0;
else fread (n, sizeof(int), 1, infileptr);
}
MPI_Bcast (n, 1, MPI_INT, p-1, comm);
if (! *n) {
if (!id) {
printf ("Input file '%s' cannot be opened\n", s);
fflush (stdout);
}
}
/* Block mapping of vector elements to processes */
local_els = BLOCK_SIZE(id,p,*n);
/* Dynamically allocate vector. */
*v = my_malloc (id, local_els * datum_size);
if (id == (p-1)) {
for (i = 0; i < p-1; i++) {
x = fread (*v, datum_size, BLOCK_SIZE(i,p,*n),
infileptr);
MPI_Send (*v, BLOCK_SIZE(i,p,*n), dtype, i, DATA_MSG,
comm);
}
x = fread (*v, datum_size, BLOCK_SIZE(id,p,*n),
infileptr);
fclose (infileptr);
} else {
MPI_Recv (*v, BLOCK_SIZE(id,p,*n), dtype, p-1, DATA_MSG,
comm, &status);
}
// My Attempt at making this collective communication:
if(id == (p-1))
{
x = fread(*v,datum_size,*n,infileptr);
for(i = 0; i < p; i++)
{
size[i] = BLOCK_SIZE(i,p,*n);
}
//x = fread(*v,datum_size,BLOCK_SIZE(id, p, *n),infileptr);
fclose(infileptr);
}
MPI_Scatterv(v,send_count,send_disp, dtype, storage, size[id], dtype, p-1, comm);
}
Any help would be appreciated.
Thank you
It's easier for people to answer your question if you post a small, self-contained, reproducible example.
For the Scatterv, you need to provide the list of counts to send to each process, which appears to be your size[] array, and the displacements within the data to send out. The mechanics of Scatter vs Scatterv are described in some detail in this answer. Trying to infer what all your variables and un-supplied functions/macros do, the example below scatters a file out to the processes.
But also note that if you're doing this, it's not much harder to actually use MPI-IO to coordinate the file access directly, avoiding the need to have one process read all of the data in the first place. Code for that is also supplied.
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
int main(int argc, char **argv) {
int id, p;
int *block_size;
int datasize = 0;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
block_size = malloc(p * sizeof(int));
for (int i=0; i<p; i++) {
block_size[i] = i + 1;
datasize += block_size[i];
}
/* create file for reading */
if (id == p-1) {
char *data = malloc(datasize * sizeof(char));
for (int i=0; i<datasize; i++)
data[i] = 'a' + i;
FILE *f = fopen("data.dat","wb");
fwrite(data, sizeof(char), datasize, f);
fclose(f);
printf("Initial data: ");
for (int i=0; i<datasize; i++)
printf("%c", data[i]);
printf("\n");
free(data);
}
if (id == 0) printf("---Using MPI-Scatterv---\n");
/* using scatterv */
int local_els = block_size[id];
char *v = malloc ((local_els + 1) * sizeof(char));
char *all;
int *counts, *disps;
counts = malloc(p * sizeof(int));
disps = malloc(p * sizeof(int));
/* counts.. */
for(int i = 0; i < p; i++)
counts[i] = block_size[i];
/* and displacements (where the data starts within the send buffer) */
disps[0] = 0;
for(int i = 1; i < p; i++)
disps[i] = disps[i-1] + counts[i-1];
if(id == (p-1))
{
all = malloc(datasize*sizeof(char));
FILE *f = fopen("data.dat","rb");
int x = fread(all,sizeof(char),datasize,f);
fclose(f);
}
MPI_Scatterv(all, counts, disps, MPI_CHAR, v, local_els, MPI_CHAR, p-1, MPI_COMM_WORLD);
if (id == (p-1)) {
free(all);
}
v[local_els] = '\0';
printf("[%d]: %s\n", id, v);
/* using MPI I/O */
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD); /* only for syncing output to screen */
if (id == 0) printf("---Using MPI-IO---\n");
for (int i=0; i<local_els; i++)
v[i] = 'X';
/* create the file layout - the subarrays within the 1d array of data */
MPI_Datatype myview;
MPI_Type_create_subarray(1, &datasize, &local_els, &(disps[id]),
MPI_ORDER_C, MPI_CHAR, &myview);
MPI_Type_commit(&myview);
MPI_File mpif;
MPI_Status status;
MPI_File_open(MPI_COMM_WORLD, "data.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &mpif);
MPI_File_set_view(mpif, (MPI_Offset)0, MPI_CHAR, myview, "native", MPI_INFO_NULL);
MPI_File_read_all(mpif, v, local_els, MPI_CHAR, &status);
MPI_File_close(&mpif);
MPI_Type_free(&myview);
v[local_els] = '\0';
printf("[%d]: %s\n", id, v);
free(v);
free(counts);
free(disps);
MPI_Finalize();
return 0;
}
Running this gives (output re-ordered for clarity)
$ mpirun -np 6 ./foo
Initial data: abcdefghijklmnopqrstu
---Using MPI-Scatterv---
[0]: a
[1]: bc
[2]: def
[3]: ghij
[4]: klmno
[5]: pqrstu
---Using MPI-IO---
[0]: a
[1]: bc
[2]: def
[3]: ghij
[4]: klmno
[5]: pqrstu
Related
I want to send a struct that contains some variables and an array dynamically allocated.
I read that it's not possible to send all at one because of the dynamic array. I should send first a message with all the other variables and then another message with the dynamic array.
Because of that, I thought I could send directly a copy of the content of the struct in an dynamic array of BYTE (look at the code for more details).
I wrote the following code.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <mpi.h>
// I read MPI_BYTE is an unsigned char
typedef unsigned char BYTE;
typedef struct Message
{
int id;
int detectNr;
char *detection;
} Msg;
int main() {
int size, rank;
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
printf("Invalid number of processes.\n");
return -1;
}
// initialize an empty message
Msg msg = {.id = 1, .detectNr = 0, .detection = NULL};
// here we should take the measurements
// for now suppose there are 10
msg.detectNr = 10;
msg.detection = malloc(sizeof(char) * msg.detectNr);
for (int i = 0; i < msg.detectNr; i++)
msg.detection[i] = 'a' + i;
if(rank != 0) {
// put the data inside a buffer of BYTE
int bufferSize = sizeof(int) + sizeof(int) + sizeof(char) * msg.detectNr;
BYTE *buffer = malloc(sizeof(BYTE) * bufferSize);
memcpy(buffer, &msg.id, sizeof(int));
memcpy(buffer + sizeof(int), &msg.detectNr, sizeof(int));
memcpy(buffer + (2 * sizeof(int)), &msg.detection, sizeof(char) * msg.detectNr);
// send buffer to process 0
MPI_Send(buffer, bufferSize, MPI_BYTE, 0, 0, MPI_COMM_WORLD);
free(buffer);
free(msg.detection);
} else {
for (int i = 1; i < size; i++) {
int bufferSize;
BYTE *buffer;
MPI_Status status;
// initialize an empty message
Msg rcv= {.id = 0, .detectNr = 0, .detection = NULL};
// probe for an incoming message from process zero
MPI_Probe(i, 0, MPI_COMM_WORLD, &status);
// when probe returns, the status object has the size and other
// attributes of the incoming message
// get the message size
MPI_Get_count(&status, MPI_BYTE, &bufferSize);
printf("\nProcess %d: buffer size: %d\n", rank, bufferSize);
// allocate a buffer to hold the incoming data
buffer = malloc(sizeof(BYTE) * bufferSize);
// now receive the message with the allocated buffer
MPI_Recv(buffer, bufferSize, MPI_BYTE, i, 0, MPI_COMM_WORLD, &status);
// copy the data from the buffer to the message
memcpy(&rcv.id, buffer, sizeof(int));
memcpy(&rcv.detectNr, buffer + sizeof(int), sizeof(int));
memcpy(&rcv.detection, buffer + (2 * sizeof(int)), sizeof(char) * rcv.detectNr);
printf("Process %d: id: %d\n", rank, rcv.id);
printf("Process %d: detectNr: %d\n", rank, rcv.detectNr);
printf("Process %d: detection: %s\n", rank, rcv.detection);
free(rcv.detection);
free(buffer);
}
}
MPI_Finalize();
return 0;
}
Unfortunately it doesn't work, here below the result.
Process 0: buffer size: 18
Process 1: id: 1
Process 1: detectNr: 10
YOUR APPLICATION TERMINATED WITH THE EXIT STRING: Segmentation fault (signal 11)
It looks like, and probably it is so, if the last part of the buffer used for receiving is empty.
I don't understand why it happens, I'm not trying to send the original array detection, but instead I'm allocating an entire new dynamic array and I'm copying all the values inside.
Can you help me to solve the problem, or can you explain to me why it doesn't works?
I don't know if this can help, I'm developing in C, under Ubuntu, with VSCode as editor and gcc as compiler.
Ok, basically the problem wasn't MPI, but the memory allocation, so my fault.
Here below the right code:
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <mpi.h>
// I read MPI_BYTE is an unsigned char
typedef unsigned char BYTE;
typedef struct Message
{
int id;
int detectNr;
char *detection;
} Msg;
int main() {
int size, rank;
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
printf("Invalid number of processes.\n");
return -1;
}
if(rank != 0) {
// initialize an empty message
Msg msg = {.id = 1, .detectNr = 0, .detection = NULL};
// here we should take the measurements
// for now suppose there are 10
msg.detectNr = 10;
msg.detection = malloc(sizeof(char) * msg.detectNr);
for (i = 0; i < msg.detectNr; i++)
msg.detection[i] = 'a' + i;
printf("Process %d: id: %d\n", rank, msg.id);
printf("Process %d: detectNr: %d\n", rank, msg.detectNr);
for (i = 0; i < msg.detectNr; i++)
printf("Process %d: msg.detect[%d]: %c\n", rank, i, msg.detection[i]);
for (int i = 0; i < msg.detectNr; i++)
msg.detection[i] = 'a' + i;
printf("Process %d: i: %d\n", rank, i);
// put the data inside a buffer of BYTE
int bufferSize = sizeof(int) + sizeof(int) + sizeof(char) * msg.detectNr;
BYTE *buffer = malloc(sizeof(BYTE) * bufferSize);
memcpy(buffer, &msg.id, sizeof(int));
memcpy(buffer + sizeof(int), &msg.detectNr, sizeof(int));
memcpy(buffer + (2 * sizeof(int)), msg.detection, sizeof(char) * msg.detectNr);
printf("\nProcess %d: buffer size: %d\n", rank, bufferSize);
// send buffer to process 0
MPI_Send(buffer, bufferSize, MPI_BYTE, 0, 0, MPI_COMM_WORLD);
free(buffer);
free(msg.detection);
} else {
for (int i = 1; i < size; i++) {
int bufferSize;
BYTE *buffer;
MPI_Status status;
// initialize an empty message
Msg rcv = {.id = 0, .detectNr = 0, .detection = NULL};
// probe for an incoming message from process zero
MPI_Probe(i, 0, MPI_COMM_WORLD, &status);
// when probe returns, the status object has the size and other
// attributes of the incoming message
// get the message size
MPI_Get_count(&status, MPI_BYTE, &bufferSize);
printf("\nProcess %d: buffer size: %d\n", rank, bufferSize);
// allocate a buffer to hold the incoming data
buffer = malloc(sizeof(BYTE) * bufferSize);
// now receive the message with the allocated buffer
MPI_Recv(buffer, bufferSize, MPI_BYTE, i, 0, MPI_COMM_WORLD, &status);
// copy the data from the buffer to the message
memcpy(&rcv.id, buffer, sizeof(int));
memcpy(&rcv.detectNr, buffer + sizeof(int), sizeof(int));
rcv.detection = malloc(sizeof(char) * rcv.detectNr);
memcpy(rcv.detection, buffer + (2 * sizeof(int)), sizeof(char) * rcv.detectNr);
printf("Process %d: id: %d\n", rank, rcv.id);
printf("Process %d: detectNr: %d\n", rank, rcv.detectNr);
for (i = 0; i < rcv.detectNr; i++)
printf("Process %d: rcv.detect[%d]: %c\n", rank, i, rcv.detection[i]);
printf("Process %d: i: %d\n", rank, i);
free(rcv.detection);
free(buffer);
}
}
MPI_Finalize();
return 0;
}
In my C program I have a structure like the one below
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
typedef struct Message
{
int elemNr;
char *elem;
} Msg;
I think I made all the steps to create custom data type in MPI
int main(int argc, char **argv) {
int size, rank;
int i;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (size < 2) {
fprintf(stderr,"Requires at least two processes.\n");
exit(-1);
}
// just for simplicity
const int n = 5;
// create a new type for struct message
MPI_Datatype myType;
Msg msgSnd;
int block_length[2] = {1, n};
MPI_Aint elemNrAddr, elemAddr;
MPI_Aint displacement[2] = {0, 0};
MPI_Get_address(&msgSnd.elemNr, &elemNrAddr);
MPI_Get_address(&msgSnd.elem, &elemAddr);
// just displacement[1] because displacement[0] starts from 0
displacement[1] = elemAddr - elemNrAddr;
MPI_Datatype types[2] = {MPI_INT, MPI_CHAR};
MPI_Type_create_struct(2, block_length, displacement, types, &myType);
MPI_Type_commit(&myType);
// populate the message
msgSnd.elemNr = n;
msgSnd.elem = malloc(sizeof(char) * msgSnd.elemNr);
srand(time(NULL));
for (i = 0; i < msgSnd.elemNr; i++)
msgSnd.elem[i] = rand() % 26 + 'a';
if (rank != 0) {
printf("I'm sending\n");
MPI_Send(&msgSnd, 1, myType, 0, 0, MPI_COMM_WORLD);
printf("I sent\n");
} else {
MPI_Status status;
Msg msgRcv;
printf("I'm receiving\n");
MPI_Recv(&msgRcv, 1, myType, 1, 0, MPI_COMM_WORLD, &status);
printf("I received\n");
for (i = 0; i < msgRcv.elemNr; i++)
printf("element %d: %c\n", i, msgRcv.elem[i]);
if (msgRcv.elem != NULL)
free(msgRcv.elem);
}
if (msgSnd.elem != NULL)
free(msgSnd.elem);
MPI_Type_free(&myType);
MPI_Finalize();
return 0;
}
I ran the above code, but unfortunately when process 0 receives the message, the elem pointer points to null and the program ends with segmentation fault.
Can you help me to find the problem? Or, how can I send a dynamic array inside a struct on MPI?
I'm programming in with MPI library in C at the moment and I have the following snippet of code that behaves very strange.
This is not a minimal reproducible example, but I think there is an obvious problem with the code snippet, even unrelated to mpi, which can be easily solved without reproduction. Do let me know if there is additional code that needed and I happily provide it!
void monitor_proposals(int people_per_gender) {
int satisfied_women = 0;
/* declarations independent of the one above (omitted) */
while (satisfied_women < people_per_gender) {
MPI_Recv(&buf, sizeof(buf), MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
sender = status.MPI_SOURCE;
index = sender/2;
printf("here\n");
printf("ppg=%d, sw=%d\n", people_per_gender, satisfied_women);
fflush(stdout);
if (women_atleast_one_proposal[index] == 0) {
women_atleast_one_proposal[index] = sender+1; /* logical id */
satisfied_women += 1;
printf("Monitor: First proposal to woman (%d)\n", sender+1);
printf("ppg=%d, sw=%d\n", people_per_gender, satisfied_women);
}
if (satisfied_women == people_per_gender) {
MPI_Send(&DONE, sizeof(DONE), MPI_INT, sender, sender, MPI_COMM_WORLD);
printf("this\n");
} else {
MPI_Send(&NOT_DONE, sizeof(NOT_DONE), MPI_INT, sender, sender, MPI_COMM_WORLD);
printf("that\n");
}
}
printf("outside\n");
}
Output in terminal:
here
ppg=1, sw=16
Monitor: First proposal to woman (1)
ppg=1, sw=17
that
My expectation is of course that satisfied_women is initialized to 0, then incremented to 1 and therefore will break the loop once it iterates. I also flush the output stream to stdout which should show me if there is uncontrolled looping but there seems not to be.
Expected output:
here
ppg=1, sw=0
Monitor: First proposal to woman (1)
ppg=1, sw=1
this
outside
I'm using mpich: stable 4.0.1 via homebrew.
EDIT
I solved the increment problem and this is the code right now, I (changed count argument to 1 in several places so that part works now).
There are n men processes and n women processes. Men and women rate eachother. Men propose to women by sending and women wait for proposals. If a woman receives a better rated man than the currently accepted, then the previous man will have to propose to another woman.
There is a monitoring process that is called once every iteration in women's while-loop, and it feeds back to the sending woman if it should exit or not. When woman exit the while-loop they notified the man that it accepted most recently.
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
typedef enum gender {
MAN,
WOMAN
} gender_t;
/* men use array of women and fill in each womens rating_t, and vice versa */
typedef struct rating {
gender_t gender;
int id;
int rating;
} rating_t;
/*******************************************************************************************
* HELPER FUNCTIONS ************************************************************************
*******************************************************************************************/
/* custom compare for qsort */
int compare(const void *r1, const void *r2) {
return ((rating_t*)r1)->rating > ((rating_t*)r2)->rating ? -1 : 1;
}
/* random shuffling of ratings */
void shuffle_ratings(rating_t *profiles, int size) {
int random_index, temp;
for (int max_index = size-1; max_index > 0; max_index--) {
random_index = rand() % (max_index+1);
/* swap values at indexes */
temp = profiles[max_index].rating;
profiles[max_index].rating = profiles[random_index].rating;
profiles[random_index].rating = temp;
}
}
/*******************************************************************************************
* PROCESSES *******************************************************************************
*******************************************************************************************/
/* keeps track of women who are with a man, eventually notifies them it's done */
void monitor_proposals(int people_per_gender) {
MPI_Status status;
const int DONE = 1;
const int NOT_DONE = 0;
int *women_atleast_one_proposal = (int*)calloc(people_per_gender, sizeof(int));
int satisfied_women = 0;
int sender, index;
int buf; /* not useful */
while (satisfied_women < people_per_gender) {
MPI_Recv(&buf, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
sender = status.MPI_SOURCE;
index = sender/2;
fflush(stdout);
if (women_atleast_one_proposal[index] == 0) {
women_atleast_one_proposal[index] = sender+1; /* logical id */
satisfied_women++;
printf("Monitor: First proposal to woman (%d)\n", sender+1);
printf("ppg=%d, sw=%d\n", people_per_gender, satisfied_women);
}
if (satisfied_women == people_per_gender) {
MPI_Send(&DONE, 1, MPI_INT, sender, sender, MPI_COMM_WORLD);
printf("this\n");
} else {
MPI_Send(&NOT_DONE, 1, MPI_INT, sender, sender, MPI_COMM_WORLD);
printf("that\n");
}
}
}
/* function for men, highest rating is proposed to first */
void propose(int id, rating_t *my_ratings) {
MPI_Status rec_status;
int proposals = 0;
int accepted = 0;
int propose_dest, propose_rating;
while (!accepted) {
propose_dest = my_ratings[proposals].id - 1;
propose_rating = my_ratings[proposals].rating;
printf("Man (%d): Proposed to woman (%d) who's rated %d\n", id, propose_dest+1, propose_rating);
fflush(stdout);
MPI_Send(&propose_rating, 1, MPI_INT, propose_dest, propose_dest, MPI_COMM_WORLD);
proposals++;
MPI_Recv(&accepted, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &rec_status);
}
printf("man %d accepted\n", id);
}
/* function for women, accepts first proposal but can replace */
void receive_proposals(int id, rating_t *my_ratings, int monitor_rank) {
MPI_Status status;
const int ACCEPT = 1;
const int REJECT = 0;
int DONT_CARE = 0;
int monitor_response;
int from_man;
int received_man_rank = -1;
int received_man_rating = -1;
int best_man_rank = -1;
int best_man_rating = -1;
while (1) {
MPI_Recv(&from_man, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
received_man_rank = status.MPI_SOURCE;
received_man_rating = my_ratings[received_man_rank/2].rating;
if (best_man_rank == -1) { /* first proposal received */
best_man_rank = received_man_rank;
best_man_rating = received_man_rating;
printf("Woman (%d): Accepted man (%d) #%d#\n", id, best_man_rank+1, best_man_rating);
} else if (received_man_rating > best_man_rating) { /* proposal is better rated than current accepted, notify replaced */
MPI_Send(&REJECT, 1, MPI_INT, best_man_rank, best_man_rank, MPI_COMM_WORLD);
printf("Woman (%d): Replaced man (%d) #%d# for man (%d) #%d#\n", id, best_man_rank+1, \
best_man_rating, received_man_rank+1, received_man_rating);
best_man_rank = received_man_rank;
best_man_rating = received_man_rating;
} else { /* notify denied man */
MPI_Send(&REJECT, 1, MPI_INT, received_man_rank, received_man_rank, MPI_COMM_WORLD);
printf("Woman (%d): Rejected proposing man (%d) #%d# due to best man (%d) #%d#\n", id, received_man_rank+1, \
received_man_rating, best_man_rank+1, best_man_rating);
}
MPI_Send(&DONT_CARE, 1, MPI_INT, monitor_rank, monitor_rank, MPI_COMM_WORLD);
MPI_Recv(&monitor_response, 1, MPI_INT, monitor_rank, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
if (monitor_response) {
printf("woman here\n");
break;
}
}
/* send ok to accepted man */
MPI_Send(&ACCEPT, 1, MPI_INT, best_man_rank, best_man_rank, MPI_COMM_WORLD);
printf("Woman (%d) + Man (%d) MARRIED\n", id, best_man_rank+1);
}
int main(int argc, char *argv[]) {
int pool_size, people_per_gender;
int rank, id, monitor_rank;
rating_t *my_ratings;
gender_t gender;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &pool_size);
if (pool_size % 2 != 1) {
if (rank == 0)
printf("Requirement: men == women and 1 extra process!\n");
MPI_Finalize();
exit(1);
}
people_per_gender = pool_size / 2; /* number of men/women */
id = rank + 1; /* logical id */
monitor_rank = pool_size - 1; /* collecting of proposals */
if (rank != monitor_rank) {
gender = (id % 2 == 0 ? MAN : WOMAN); /* odd id - woman, even id - man */
my_ratings = (rating_t*)malloc(people_per_gender * sizeof(rating_t)); /* rate half of pool, i.e. other gender */
/* create "profiles" of other gender */
for (int i = 0; i < people_per_gender; i++) {
my_ratings[i].gender = (gender == MAN ? WOMAN : MAN);
my_ratings[i].id = ( gender == MAN ? (2*i+1) : (2*i+2) );
my_ratings[i].rating = i+1;
}
/* randomize ratings of other gender */
srand(time(NULL) + id);
shuffle_ratings(my_ratings, people_per_gender);
qsort(my_ratings, people_per_gender, sizeof(rating_t), compare);
if (gender == WOMAN) printf("W(%d) ratings: ", id);
else if (gender == MAN) printf("M(%d) ratings: ", id);
for (int i = 0; i < people_per_gender; i++)
printf("| {id:%d, %d} | ", my_ratings[i].id, my_ratings[i].rating);
printf("\n");
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == monitor_rank) printf("\n");
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
/* call function based on process type */
if (rank == monitor_rank) {
monitor_proposals(people_per_gender);
} else {
if (gender == WOMAN)
receive_proposals(id, my_ratings, monitor_rank);
else if (gender == MAN)
propose(id, my_ratings);
}
MPI_Barrier(MPI_COMM_WORLD);
printf("ID (%d): Done\n", id);
MPI_Finalize();
return 0;
}
You're not giving us enough code. How is the buffer defined in:
MPI_Recv(&buf, sizeof(buf), MPI_INT,
int buf then this is almost correct because sizeof will give 4.
int buf[20] then sizeof will give the sizes in bytes, not in ints.
buf = (int*)malloc(whatever) then sizeof will give 8 bytes for the pointer.
In other words, it's definitely wrong, but precisely how we can not tell.
First, I precise that I am french and my english is not really good.
I am working on MPI application and I have some problems and I hope that somebody can help me.
As reported in the title of my post, I try to use a thread to listen when I have to kill my application and then call the MPI_Finalize function.
However, my application does not finish correcty.
More precisely, I obtain the following message:
[XPS-2720:27441] * Process received signal *
[XPS-2720:27441] Signal: Segmentation fault (11)
[XPS-2720:27441] Signal code: Address not mapped (1)
[XPS-2720:27441] Failing at address: 0x7f14077a3b6d
[XPS-2720:27440] * Process received signal *
[XPS-2720:27440] Signal: Segmentation fault (11)
[XPS-2720:27440] Signal code: Address not mapped (1)
[XPS-2720:27440] Failing at address: 0x7fb11d07bb6d
mpirun noticed that process rank 1 with PID 27440 on node lagniez-XPS-2720 exited on signal 11 (Segmentation fault).
My slave code is:
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <sys/types.h>
#include <pthread.h>
#include <cassert>
#define send_data_tag 1664
#define send_kill_tag 666
void *finilizeMPICom(void *intercomm)
{
printf("the finilizeMPICom was called\n");
MPI_Comm parentcomm = * ((MPI_Comm *) intercomm);
MPI_Status status;
int res;
// sleep(10);
MPI_Recv(&res, 1, MPI_INT, 0, send_kill_tag, parentcomm, &status);
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("we receive something %d -- %d\n", rank, res);
MPI_Finalize();
exit(0);
}// finilizeMPICom
int main( int argc, char *argv[])
{
int numtasks, rank, len, rc;
char hostname[MPI_MAX_PROCESSOR_NAME];
int provided, claimed;
rc = MPI_Init_thread(0, 0, MPI_THREAD_MULTIPLE, &provided);
MPI_Query_thread( &claimed );
if (rc != MPI_SUCCESS || provided != 3)
{
printf ("Error starting MPI program. Terminating.\n");
MPI_Abort(MPI_COMM_WORLD, rc);
}
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm parentcomm;
MPI_Comm_get_parent(&parentcomm);
/* create a second thread to listen when we have to kill the program */
pthread_t properlyKill;
if(pthread_create(&properlyKill, NULL, finilizeMPICom, (void *) &parentcomm))
{
fprintf(stderr, "Error creating thread\n");
return 0;
}
assert(parentcomm != MPI_COMM_NULL);
MPI_Status status;
int root_process, ierr, num_rows_to_receive;
int mode;
MPI_Recv( &mode, 1, MPI_INT, 0, send_data_tag, parentcomm, &status);
printf("c The solver works in the mode %d\n", mode);
printf("I sent a message %d\n", rank);
// if(rank != 1) sleep(100);
int res = 1;
MPI_Send(&res, 1, MPI_INT, 0, send_data_tag, parentcomm);
printf("we want to listen for somethiing %d\n", rank);
int rescc = 1;
MPI_Recv(&rescc, 1, MPI_INT, 0, send_data_tag, parentcomm, &status);
printf("I received the message %d %d\n", rescc, rank);
if(rescc == 1000)
{
printf("~~~~~~~~>>> I print the solution %d\n", rank);
int res3 = 1001;
MPI_Send(&res3, 1, MPI_INT, 0, send_data_tag, parentcomm);
}
else printf("I do not understand %d\n", rank);
printf("I wait the thread to kill the programm %d\n", rank);
pthread_join(properlyKill, (void**)&(res));
return 0;
}
For the master I have:
int main(int argc, char **argv)
{
Parser *p = new Parser("slave.xml");
MPI_Init(&argc, &argv);
if(p->method == "concurrent")
{
ConcurrentManager cc(p->instance, p->solvers);
cc.run();
}
else
{
cerr << "c The only available methods are: concurrent, eps (Embarrassingly Parallel Search) or tree" << endl;
exit(1);
}
delete(p);
MPI_Finalize();
exit(0);
}// main
/**
Create a concurrent manager (means init the data structures to run
the solvers).
#param[in] _instance, the benchmark path
#param[in] _solvers, the set of solvers that will be ran
*/
ConcurrentManager::ConcurrentManager(string _instance, vector<Solver> &_solvers) :
instance(_instance), solvers(_solvers)
{
cout << "c\nc Concurrent manager called" << endl;
nbSolvers = _solvers.size();
np = new int[nbSolvers];
cmds = new char*[nbSolvers];
arrayOfArgs = new char **[nbSolvers];
infos = new MPI_Info[nbSolvers];
for(int i = 0 ; i<nbSolvers ; i++)
{
np[i] = solvers[i].npernode;
cmds[i] = new char[(solvers[i].executablePath).size() + 1];
strcpy(cmds[i], (solvers[i].executablePath).c_str());
arrayOfArgs[i] = new char *[(solvers[i].options).size() + 1];
for(unsigned int j = 0 ; j<(solvers[i].options).size() ; j++)
{
arrayOfArgs[i][j] = new char[(solvers[i].options[j]).size() + 1];
strcpy(arrayOfArgs[i][j], (solvers[i].options[j]).c_str());
}
arrayOfArgs[i][(solvers[i].options).size()] = NULL;
MPI_Info_create(&infos[i]);
char hostname[solvers[i].hostname.size()];
strcpy(hostname, solvers[i].hostname.c_str());
MPI_Info_set(infos[i], "host", hostname);
}
sizeComm = 0;
}// constructor
/**
Wait that at least one process finish and return the code
SOLUTION_FOUND.
#param[in] intercomm, the communicator
*/
void ConcurrentManager::waitForSolution(MPI_Comm &intercomm)
{
MPI_Status arrayStatus[sizeComm], status;
MPI_Request request[sizeComm];
int val[sizeComm], flag;
for(int i = 0 ; i<sizeComm ; i++) MPI_Irecv(&val[i], 1, MPI_INT, i, TAG_MSG, intercomm, &request[i]);
bool solutionFound = false;
while(!solutionFound)
{
for(int i = 0 ; i<sizeComm ; i++)
{
MPI_Test(&request[i], &flag, &arrayStatus[i]);
if(flag)
{
printf("---------------------> %d reveived %d\n", i , val[i]);
if(val[i] == SOLUTION_FOUND)
{
int msg = PRINT_SOLUTION;
MPI_Send(&msg, 1, MPI_INT, i, TAG_MSG, intercomm); // ask to print the solution
int msgJobFinished;
MPI_Recv(&msgJobFinished, 1, MPI_INT, i, TAG_MSG, intercomm, &status); // wait the answer
assert(msgJobFinished == JOB_FINISHED);
cout << "I am going to kill everybody" << endl;
int msgKill[sizeComm];
for(int j = 0 ; j<sizeComm ; j++)
{
msgKill[i] = STOP_AT_ONCE;
MPI_Send(&msgKill[i], 1, MPI_INT, j, TAG_KILL, intercomm);
}
solutionFound = true;
break;
} else
{
printf("restart the communication for %d\n", i);
MPI_Irecv(&val[i], 1, MPI_INT, i, TAG_MSG, intercomm, &request[i]);
}
}
}
}
}// waitForSolution
/**
Run the solver.
*/
void ConcurrentManager::run()
{
MPI_Comm intercomm;
int errcodes[solvers.size()];
MPI_Comm_spawn_multiple(nbSolvers, cmds, arrayOfArgs, np, infos, 0, MPI_COMM_WORLD, &intercomm, errcodes);
MPI_Comm_remote_size(intercomm, &sizeComm);
cout << "c Solvers are now running: " << sizeComm << endl;
int msg = CONCU_MODE;
for(int i = 0 ; i<sizeComm ; i++) MPI_Send(&msg, 1, MPI_INT, i, TAG_MSG, intercomm); // init the working mode
waitForSolution(intercomm);
}// run
I know that I put a lot of code :(
But, I do not know where is the problem.
Please, help me :)
Best regards.
The MPI documentation for how MPI interacts with threads demands that the call to MPI_Finalize() be performed by the main thread -- that is, the same one that initialized MPI. In your case, that happens also to be your process's initial thread.
In order to satisfy MPI's requirements, you could reorganize your application so that the initial thread is the one that waits for a kill signal and then shuts down MPI. The other work it currently does would then need to be moved to a different thread.
I am trying very hard to get my code to run using MPI. I am trying to achieve matrix multiplication.
My code is like this
There are two matrices A and B
Scatter the rows of A
Broadcast matrix B
Compute
Gather
I have written the code...but the code is not running right... I am getting segmentation fault.
I have no idea why this is happening...I tried tweaking the code a lot ...but it seems something is always wrong.
Could some one go over this code and tell me why the code is not working?
I have added the comments:
"Scattering matrices"
"Gathering answers" and so on...so even if you could just go through the scatter part of the program and tell me why it is not right, I'll be thankful!
#define N 512
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include "mpi.h"
void print_results(char *prompt, float result[N][N]);
int main(int argc, char *argv[])
{
int i, j, k;
MPI_Status status;
int process_rank;//rank of a process
int no_of_processes;//no. of processes
int Master_To_Slave = 0;
int Slave_To_Master = 5;
float a[N][N], b[N][N], c[N][N];
char *usage = "Usage: %s file\n";
FILE *fd;
double elapsed_time, start_time, end_time;
struct timeval tv1, tv2;
if (argc < 2) {
fprintf (stderr, usage, argv[0]);
return -1;
}
if ((fd = fopen (argv[1], "r")) == NULL) {
fprintf (stderr, "%s: Cannot open file %s for reading.\n",
argv[0], argv[1]);
fprintf (stderr, usage, argv[0]);
return -1;
}
// Read input from file for matrices a and b.
// The I/O is not timed because this I/O needs
// to be done regardless of whether this program
// is run sequentially on one processor or in
// parallel on many processors. Therefore, it is
// irrelevant when considering speedup.
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
fscanf (fd, "%f", &a[i][j]);
}
}
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
fscanf (fd, "%f", &b[i][j]);
int num_of_rows_A = N;
int num_of_cols_A = N;
int num_of_rows_B = N;
int num_of_cols_B = N;
int lower_index_of_A;
int upper_index_of_A;
//TODO: Add a barrier prior to the time stamp.
MPI_Init(&argc, &argv); //initialize MPI operations
MPI_Barrier(MPI_COMM_WORLD); //Added Barrier
// Take a time stamp
gettimeofday(&tv1, NULL);
//TODO: Scatter the input matrices a and b.
MPI_Comm_rank(MPI_COMM_WORLD, &process_rank); //get the rank
MPI_Comm_size(MPI_COMM_WORLD, &no_of_processes); //get number of processes
if(process_rank==0){
fprintf (stderr, "Main process started");
fprintf (stderr, "No. of process %d",no_of_processes);
fprintf (stderr, "\n\n");
fprintf (stderr, "\n\n");
for( i=1; i<no_of_processes;i++)
{
int rows_per_process = num_of_rows_A/(no_of_processes-1);
lower_index_of_A = (i-1)*rows_per_process;
// fprintf (stderr, "Current lower Index of A %s",lower_index_of_A);
if(i+1==no_of_processes &&((num_of_rows_A%(no_of_processes-1))!=0))
{
upper_index_of_A=num_of_rows_A;
// fprintf (stderr, "Current upper Index of A %s",upper_index_of_A);
}
else
{
upper_index_of_A=lower_index_of_A+rows_per_process;
// fprintf (stderr, "Current upper Index of A %s",upper_index_of_A);
}
fprintf (stderr, "Lower index of A %d", lower_index_of_A);
fprintf (stderr, "Upper index of A %d", upper_index_of_A);
fprintf (stderr, "\n\n");
MPI_Send(&lower_index_of_A,1,MPI_INT,i,Master_To_Slave,MPI_COMM_WORLD); //send lower index
MPI_Send(&upper_index_of_A,1,MPI_INT,i,Master_To_Slave+1,MPI_COMM_WORLD);//send upper index
MPI_Send(&a[lower_index_of_A][0],(upper_index_of_A-lower_index_of_A)*num_of_cols_A,MPI_DOUBLE,i,Master_To_Slave+2,MPI_COMM_WORLD);//send rows of A
fprintf (stderr, "Scatter done");
}
MPI_Bcast(&b, num_of_rows_A*num_of_cols_B, MPI_DOUBLE, 0, MPI_COMM_WORLD);
fprintf (stderr, "Broadcast done");
}
else
{
MPI_Recv(&lower_index_of_A, 1, MPI_INT, 0, Master_To_Slave,MPI_COMM_WORLD, &status);
MPI_Recv(&upper_index_of_A, 1, MPI_INT, 0, Master_To_Slave+1,MPI_COMM_WORLD,&status);
MPI_Recv(&a[lower_index_of_A], (upper_index_of_A-lower_index_of_A)*num_of_cols_A, MPI_DOUBLE,0, Master_To_Slave+2,MPI_COMM_WORLD, &status);
//TODO: Add code to implement matrix multiplication (C=AxB) in parallel.
for( i=lower_index_of_A;i<upper_index_of_A;i++)
{
for( j=0;j<num_of_cols_B;j++)
{
for( k=0;k<num_of_rows_B;k++)
{
c[i][j]+=(a[i][k]*b[k][j]);
}
}
}
MPI_Send(&lower_index_of_A, 1, MPI_INT, 0, Slave_To_Master,MPI_COMM_WORLD);
MPI_Send(&upper_index_of_A, 1, MPI_INT, 0, Slave_To_Master+1,MPI_COMM_WORLD);
MPI_Send(&c[lower_index_of_A], (upper_index_of_A-lower_index_of_A)*num_of_cols_B, MPI_DOUBLE,0, Slave_To_Master+2,MPI_COMM_WORLD);
}
//TODO: Gather partial result back to the master process.
if(process_rank==0)
{
for(i=1;i<no_of_processes;i++)
{
MPI_Recv(&lower_index_of_A, 1, MPI_INT, i, Slave_To_Master, MPI_COMM_WORLD, &status);
//receive upper bound from a slave
MPI_Recv(&upper_index_of_A, 1, MPI_INT, i, Slave_To_Master + 1, MPI_COMM_WORLD, &status);
//receive processed data from a slave
MPI_Recv(&c[lower_index_of_A][0], (upper_index_of_A - lower_index_of_A) * num_of_cols_B, MPI_DOUBLE, i, Slave_To_Master + 2, MPI_COMM_WORLD, &status);
}
}
// Take a time stamp. This won't happen until after the master
// process has gathered all the input from the other processes.
gettimeofday(&tv2, NULL);
elapsed_time = (tv2.tv_sec - tv1.tv_sec) +
((tv2.tv_usec - tv1.tv_usec) / 1000000.0);
printf ("elapsed_time=\t%lf (seconds)\n", elapsed_time);
// print results
print_results("C = ", c);
}
void print_results(char *prompt, float result[N][N])
{
int i, j;
printf ("\n\n%s\n", prompt);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf(" %.2f", result[i][j]);
}
printf ("\n");
}
printf ("\n\n");
}
Whenever this type of thing happens, I fire up a debugger.
I always recommend the parallel debugger Allinea DDT but I am biased as I am one of the team developing it, but it helps find this kind of bug. You can also try GDB but that will require more manual intervention to handle the multiple processes.
In your case, the code you've posted doesn't segfault in my MPI (Open MPI), it actually hangs at the MPI_Bcast in proc 0, and the rest hang at the MPI_Sends in their branch of the loop. This is because ranks 1 and above are not calling MPI_Bcast: they need to, to match the sender, rank 0, and receive the data.
Why not download a debugger and see for yourself - once you've fixed this broadcast mismatch, the debugger will halt your program as soon as you get the segmentation fault that you are looking for and show you where the fault lies.