i have a probelm with mpi and MPI_Allgather and MPI_pack.
I have structure :
typedef struct{
float a;
int b;
int c[];
}struc_t;
I intialize my structure :
struc_t* test=(struc_t*)malloc(sizeof(struc_t)+200*sizeof(int));
And i would like send a array of my structure with MPI_Allgather :
int sizeSend,sizeRcv;
char *bufferSend,*bufferRecv;
int positionSend,PositionRecv;
MPI_Pack_size(10, MPI_MYTYPE , MPI_COMM_WORLD , &sizeSend);
MPI_Pack_size(10*nbProc, MPI_MYTYPE , MPI_COMM_WORLD , &sizeRcv);
MPI_Status statut;
The code of MPI_MYTYPE:
MPI_Aint offsets[3],extent;
int blockcounts[3];
MPI_Datatype oldtypes[3];
MPI_Datatype TAB;
MPI_Type_contiguous(nb,MPI_INT,&TAB);
MPI_Type_commit(&TAB);
offsets[0]=0;
oldtypes[0] = MPI_FLOAT;
blockcounts[0] = 1;
MPI_Type_extent(MPI_FLOAT, &extent);
offsets[1]=extent;
oldtypes[1] = MPI_INT;
blockcounts[1] = 1;
MPI_Type_extent(MPI_INT, &extent);
offsets[2]=extent + offsets[1];
oldtypes[2] = TAB;
blockcounts[2] =1;
MPI_Type_struct(3, blockcounts, offsets, oldtypes, dptr);
MPI_Type_commit(MPI_MYTYPE);
I create my pack :
positionSend=0;
positionRcv=0;
bufferSend = (char*) malloc(sizeSend);
bufferRecv = (char*) malloc(sizeRcv);
for(i=0;i<10;i++){
struc_t *elm = getElement(i);
MPI_Pack(&elm->a,1,MPI_FLOAT,bufferSend,sizeSend,&positionSend,MPI_COMM_WORLD);
MPI_Pack(&elm->b,1,MPI_INT,bufferSend,sizeSend,&positionSend,MPI_COMM_WORLD);
MPI_Pack(elm->c,200,MPI_INT,bufferSend,sizeSend,&positionSend,MPI_COMM_WORLD);
}
and the reception :
MPI_Allgather(bufferSend,1, MPI_PACKED, bufferRecv,1,MPI_PACKED, MPI_COMM_WORLD);
for(i=0;i<10*nbProc;i++){
struc_t* recvStruc=(struc_t*)malloc(sizeof(struc_t)+200*sizeof(int));
MPI_Unpack(bufferRecv, sizeRcv, &positionRcv,&recvStruc->a,1, MPI_FLOAT,MPI_COMM_WORLD);
MPI_Unpack(bufferRecv, sizeRcv, &positionRcv,&recvStruc->b,1, MPI_INT,MPI_COMM_WORLD);
MPI_Unpack(bufferRecv, sizeRcv, &positionRcv,recvStruc->c,200, MPI_INT,MPI_COMM_WORLD);
}
But the resultat of recvStruc is 0 :( where is the problem? If you help me, I call you god lol.
thx
Why pack your structs? It might make sense if they were variable length, but here you are transmitting the 200 integers anyway. A better solution is to just use the MPI datatypes. That way you have a chance of avoiding memory copies, and if the MPI library does need to pack your data behind the scenes, it can do it automatically.
Here's a working example:
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
typedef struct{
float a;
int b;
int c[];
} struc_t;
int main (int argc, char **argv)
{
MPI_Init(&argc, &argv);
int nproc;
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
struc_t *test;
MPI_Aint struc_t_size;
MPI_Datatype struc_t_type;
{
int blocklen[] = {1, 1, 200};
MPI_Aint addr[4];
MPI_Address(test, &addr[0]);
MPI_Address(&test->a, &addr[1]);
MPI_Address(&test->b, &addr[2]);
MPI_Address(&test->c, &addr[3]);
MPI_Aint disp[] = { addr[1] - addr[0],
addr[2] - addr[0],
addr[3] - addr[0] };
MPI_Datatype types[] = {MPI_FLOAT, MPI_INT, MPI_INT};
MPI_Type_create_struct(3, blocklen, disp, types, &struc_t_type);
MPI_Type_commit(&struc_t_type);
}
MPI_Type_extent(struc_t_type, &struc_t_size);
test = malloc(struc_t_size);
// Put our rank in b to verify operation
MPI_Comm_rank(MPI_COMM_WORLD, &test->b);
void *buf = malloc(struc_t_size * nproc);
MPI_Allgather(test, 1, struc_t_type, buf, 1, struc_t_type, MPI_COMM_WORLD);
MPI_Type_free(&struc_t_type);
{
int i;
struc_t *p;
// Verify that everything was received correctly
for (i = 0; i < nproc; i++) {
p = buf + struc_t_size * i;
printf("%d %d\n", i, p->b);
}
}
MPI_Finalize();
return 0;
}
Related
I have to send a struct that contains, among other things, a dynamically allocated array of another struct.
The receiver has to merge the received message with its data and then send the result to another process.
Basically what I want to obtain is something like that.
I have implemented the following code.
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
typedef struct Data {
char character;
int frequency;
} Data;
typedef struct Dictionary {
int charsNr;
Data *data;
} Dictionary;
typedef struct Header {
int id;
int size;
MPI_Datatype *type;
int position;
} Header;
static const int NUM_CHARS = 5;
typedef unsigned char BYTE;
void buildMyTypr(MPI_Datatype *dictType) {
int blockLengths[] = {1, 1};
MPI_Datatype types[] = {MPI_CHAR, MPI_INT};
MPI_Aint offsets[2];
offsets[0] = offsetof(Data, character);
offsets[1] = offsetof(Data, frequency);
MPI_Type_create_struct(2, blockLengths, offsets, types, dictType);
MPI_Type_commit(dictType);
}
void appendData(Dictionary *dict, Data *data) {
dict->data = realloc(dict->data, sizeof(Data) * (dict->charsNr+1));
dict->data[dict->charsNr] = (struct Data) {.character = data->character, .frequency = data->frequency};
++dict->charsNr;
}
void mergeDicts(Dictionary *dst, Dictionary *src) {
for (int i = 0; i < src->charsNr; i++) {
char character = src->data[i].character;
int frequency = src->data[i].frequency;
bool assigned = false;
for (int j = 0; j < dst->charsNr && !assigned; j++) {
if (dst->data[j].character == character) {
dst->data[j].frequency += frequency;
assigned = true;
}
}
if (!assigned)
appendData(dst, &src->data[i]);
}
}
int getRand(const int from, const int to)
{
int num = (rand() % (to - from + 1)) + from;
return num;
}
void getMessageSize(int *size, int rank, int tag, MPI_Status *status) {
MPI_Probe(rank, tag, MPI_COMM_WORLD, status);
MPI_Get_count(status, MPI_BYTE, size);
}
BYTE* packDictionary(Header *header, Dictionary *dict) {
header->size = sizeof(int) + (sizeof(Data) * dict->charsNr);
BYTE *buffer = malloc(sizeof(BYTE) * (header->size));
header->position = 0;
MPI_Pack(&dict->charsNr, 1, MPI_INT, buffer, header->size, &header->position, MPI_COMM_WORLD);
MPI_Pack(dict->data, dict->charsNr, *header->type, buffer, header->size, &header->position, MPI_COMM_WORLD);
return buffer;
}
void unpackDictionary(Header *header, Dictionary *dict, BYTE *buffer) {
MPI_Unpack(buffer, header->size, &header->position, &dict->charsNr, 1, MPI_INT, MPI_COMM_WORLD);
dict->data = malloc(sizeof(Data) * dict->charsNr);
MPI_Unpack(buffer, header->size, &header->position, dict->data, dict->charsNe, *header->type, MPI_COMM_WORLD);
}
void MyTypeOp(contType *in, contType *out, int *len, MPI_Datatype *typeptr)
{
MPI_Status *status;
Dictionary inDict = {.charsNr = 0, .data = NULL};
Dictionary outDict = {.charsNr = 0, .data = NULL};
int bufferSize = 0;
// how can I get the size of the buffers?
// in other occasion I use the getMessageSize(), but I'm not sure
// if it can be useful here
// how can I get the type of the message, basically the dictType?
Header header = {.id = 0, .size = 0, .type = NULL, .position = 0};
unpackDictionary(&header, &inDict, in);
// I should update the header with the new size
unpackDictionary(&header, &outDict, out);
mergeDicts(&outDict, &inDict);
header.size = 0;
out = packDictionary(header, &outDict);
}
int main(int argc, char **argv)
{
int proc_number;
int rank;
MPI_Comm_size(MPI_COMM_WORLD, &proc_number);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
Dictionary dict = {.charsNr = NUM_CHARS, .data = NULL};
dict.data = malloc(dict.charsNr * sizeof(Data));
// create a random dictionary
// I use for simplicity NUM_CHARS but in real life it will be unknown
// at the beginning and every dictionary can have a different size
for (int i = 0; i < NUM_CHARS; i++) {
int freq = getRand(1, 10);
dict.data[i].frequency = freq;
dict.data[i].character = 'a' + getRand(1, 5) + i + rank;
}
MPI_Datatype dictType;
buildMyType(&dictType);
MPI_Op MyOp;
MPI_Op_create((MPI_User_function *) MyTypeOp, 1, &MyOp);
MPI_Datatype contType;
MPI_Type_contiguous(1, MPI_PACKED, &contType);
MPI_Type_commit(&contType);
Header header = {.id = 0, .size = 0, .type = &dictType, .position = 0};
// when I pack the message I put everithing inside a buffer of BYTE
BYTE *buffer = packDictionary(&header, &dict);
BYTE *buffer_rcv = NULL;
MPI_Reduce(&buffer,& buffer_rcv, 1, contType, MyOp, 0, MPI_COMM_WORLD);
if(rank == 0)
for (i = 0; i < NUM_CHARS; i++)
printf("%c: %d\n", dict.data[i].character, dict.data[i].frequency);
MPI_Type_free(&contType);
MPI_Type_free(&dictType);
MPI_Op_free(&MyOp);
free(buffer);
free(buffer_rcv);
free(dict.data);
MPI_Finalize();
return 0;
}
Of course this example cannot run.
Do you have any suggestion on how can I do it?
I'm writing the code in C on Linux machine.
Thanks.
My main question is how can I associate members of a custom-defined type/struct to that allocated via MPI_Win_allocate_shared(size,disp,...,&baseptr, &win). A help in either C or fortran is appreciated! Below I have included a sketch of what I wanted to do in both C and fortran.
An example in C is roughly as follows:
struct MyStruct{
int * ptr_int;
double * ptr_dble;
};
main(){
int n1,n2,n3;
struct * data;
// I am looking to use MPI to allocate a struct equivalent to the following:
// data = calloc(n3,sizeof(struct MyStruct))
// for (int i=0;i<n3;i++) {
// data[i].ptr_int = calloc(n1,sizeof(int));
// data[i].ptr_dble = calloc(n2,sizeof(double));}
int w_size,w_rank;
MPI_Init(NULL,NULL);
MPI_Comm_size(MPI_COMM_WORLD,&w_size);
MPI_Comm_rank(MPI_COMM_WORLD,&w_rank);
MPI_Win win;
MPI_Aint size;
void * baseptr;
if (w_rank==0){
size = n3*(sizeof(int)*n1 + sizeof(double)*n2);
MPI_Win_allocate_shared(size,1,MPI_INFO_NULL,MPI_COMM_WORLD,&baseptr,&win);
// Question: how to associate struct * data with win, baseptr?
// Can &win then be initialized by calling data[i].ptr_int[j] = ...?
}else{
MPI_Win_shared_query(...);
// Question: again, how to associated struct * data with win, baseptr?
}
}
Equivalently an example in fortran is follows:
type MyStruct
integer, allocatable :: ptr_int(:)
real, allocatable :: ptr_dble(:)
end type
program main
implicit none
use mpi
integer :: n1,n2,n3
type(MyStruct), allocatable :: data
integer :: w_rank, w_size, ierr
call mpi_init(ierr)
call mpi_comm_size(mpi_comm_world,w_size,ierr)
call mpi_comm_rank(mpi_comm_world,w_rank,ierr)
MPI_Win MPI_Win
MPI_Aint size
if (w_rank==0) then
size = n3*(sizeof(int)*n1 + sizeof(double)*n2)
call mpi_win_allocate_shared(size,1,MPI_INFO_NULL,MPI_COMM_WORLD,baseptr,win)
! Question: how to associate data with win, baseptr?
! Can win then be initialized by calling data(i)%ptr_int(j) = ...?
else
call mpi_win_shared_query(...);
! Question: again, how to associated type(mystruct) data with win, baseptr?
endif
end program main
I solved the C part of the problem, thanks to a similar question posted earlier at: MPI-3 Shared Memory for Array Struct. I still need to implement it in fortran which is more relevant to my current work.
The key aspect is that one can define a pointer to the struct in each MPI process, and use pointer arithmetics to associate the shared memory with the data structure. A complete C solution is given as follows:
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
struct product{
int nint;
int ndble;
int * ptr_int;
double * ptr_dble;
};
int main(){
int n1,n2,nproduct;
n1 = 3;
n2 = 4;
nproduct = 2;
struct product * tmp = calloc(nproduct,sizeof(struct product));
// Initiate MPI
int world_size,world_rank;
int disp_unit;
MPI_Win win;
MPI_Aint size;
void * baseptr;
MPI_Init(NULL,NULL);
MPI_Comm_size(MPI_COMM_WORLD,&world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
printf("Hello world from rank %d out of %d processors\n",world_rank,world_size);
if (world_rank==0){
size = (sizeof(int)*n1 + sizeof(double)*n2) * nproduct;
disp_unit = 1;
MPI_Win_allocate_shared(size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,&baseptr, &win);
printf("Success allocation\n");
}
else{
MPI_Win_allocate_shared(0,1,MPI_INFO_NULL,MPI_COMM_WORLD,&baseptr,&win);
MPI_Win_shared_query(win,0,&size,&disp_unit,&baseptr);
printf("Success query\n");
}
for (int i=0;i<nproduct;i++){
tmp[i].nint = n1;
tmp[i].ndble = n2;
tmp[i].ptr_int = (int*) baseptr;
tmp[i].ptr_dble = (double *) (baseptr + sizeof(int)*n1);
}
if (world_rank==0){
//MPI_Win_lock(MPI_LOCK_EXCLUSIVE,0,MPI_MODE_NOCHECK,win);
for (int i =0;i<nproduct;i++){
// initialize data stored in win via tmp
for (int j =0;j<n1;j++){
tmp[i].ptr_int[j] = j;
}
for (int j=0;j<n2;j++){
tmp[i].ptr_dble[j] = 2*(j-3);
}
}
//MPI_Win_unlock(0,win);
}
MPI_Barrier(MPI_COMM_WORLD);
// test
if (world_rank==1){
for (int j =0;j<n1;j++){
printf("%d ",tmp[1].ptr_int[j]);
}
printf("\n");
for (int j=0;j<n2;j++){
printf("%f ",tmp[1].ptr_dble[j]);
}
printf("\n");
}
MPI_Win_free(&win);
MPI_Finalize();
}
I've been setting up a four-node mpi cluster with raspberry pis. As far as I can tell, I am down to one final major issue, and that is how to send an array of structs from each worker to the manager. I have cropped down the code to the below, but this could take a few tries, as I might have cropped too much. Albeit, I still get the same error (a seg fault, saying an address is not mapped), but sorry if there's a bit of back and fourth.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
struct ticknrank
{
char * ticker;
int errors;
int rank;
};
int main() //Designed for one master, three slaves
{
// i am under the impression the problem lies somewhere in this beginning section, before the commit.
int my_id;
MPI_Init(NULL,NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
MPI_Status status;
MPI_Datatype types[3] = {MPI_CHAR,MPI_INT,MPI_INT};
MPI_Datatype MPI_ticknrank, MPI_tmp;
int blocklengths[3] ={8,1,1};
MPI_Aint offsets[3];
offsets[0] = offsetof(struct ticknrank,ticker);
offsets[1] = offsetof(struct ticknrank,errors);
offsets[2]= offsetof(struct ticknrank,rank);
MPI_Aint lb, extent;
MPI_Type_create_struct(3,blocklengths, offsets, types, &MPI_tmp);
MPI_Type_get_extent(MPI_tmp, &lb, &extent);
MPI_Type_create_resized(MPI_tmp, lb, extent, &MPI_ticknrank);
MPI_Type_commit(&MPI_ticknrank);
// NOTE: sizeof(ticknrank) = 12, while MPI_Type_size(ticknrank) = 16. Not sure what to do about that.
if(my_id == 0) // meaning this process is a host job
{
//NOTE: NodethrRes and fou can be ommitted, I was just lazy and didn't wanna delete them
//on my cluster.
int length = 2;
struct ticknrank * NodeTwoRes = (struct ticknrank *)malloc(length * sizeof(struct ticknrank));
struct ticknrank * NodeThrRes = (struct ticknrank *)malloc(length * sizeof(struct ticknrank));
struct ticknrank * NodeFouRes = (struct ticknrank *)malloc(length * sizeof(struct ticknrank));
MPI_Recv(NodeTwoRes, length, MPI_ticknrank,1,MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(NodeThrRes, length, MPI_ticknrank,2,MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(NodeFouRes, length, MPI_ticknrank,3,MPI_ANY_TAG, MPI_COMM_WORLD, &status);
printf("%s\n", NodeTwoRes[0].ticker);
}
else
{
int myLen = 2;
struct ticknrank * results = malloc(myLen * sizeof(struct ticknrank));
results[0].ticker = strdup("FIRST");
results[0].rank = 4;
results[0].errors = 7;
results[1].ticker = strdup("SECON");
results[1].rank = 3;
results[1].errors = 15;
MPI_Send(results,myLen,MPI_ticknrank,0,1,MPI_COMM_WORLD);
}
MPI_Type_free(&MPI_ticknrank);
MPI_Finalize();
return 0;
}
The C struct is a char * ticker (which is 4 bytes if you are running 32 bits), but the derived datatype is for a char ticker[8] which is indeed 8 bytes.
If you want to send multiple struct ticknrank in one shot, then the data should be in contiguous memory, which means moving from char * ticker to char ticker[8], and replacing strdup() with strcpy() (and up to you to make sure there is no buffer overflow).
I am trying to program an MPI_Alltoallv using an MPI Derived datatype using MPI_Type_create_struct. I could not find any examples solving this particular problem. Most examples like this perform communication(Send/Recv) using a single struct member, whereas I am targeting an array of structs. Following is a simpler test code that attempts a MPI_Sendrecv operation on an array of structs created using DDT:
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <stddef.h>
typedef struct sample{
char str[12];
int count;
}my_struct;
int main(int argc, char **argv)
{
int rank, count;
my_struct *sbuf = (my_struct *) calloc (sizeof(my_struct),5);
my_struct *rbuf = (my_struct *) calloc (sizeof(my_struct),5);
int blens[2];
MPI_Aint displs[2];
MPI_Aint baseaddr, addr1, addr2;
MPI_Datatype types[2];
MPI_Datatype contigs[5];
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
strcpy(sbuf[0].str,"ACTGCCAATTCG");
sbuf[0].count = 10;
strcpy(sbuf[1].str,"ACTGCCCATACG");
sbuf[1].count = 5;
strcpy(sbuf[2].str,"ACTGCCAATTTT");
sbuf[2].count = 6;
strcpy(sbuf[3].str,"CCTCCCAATTCG");
sbuf[3].count = 12;
strcpy(sbuf[4].str,"ACTATGAATTCG");
sbuf[4].count = 8;
blens[0] = 12; blens[1] = 1;
types[0] = MPI_CHAR; types[1] = MPI_INT;
for (int i=0; i<5; i++)
{
MPI_Get_address ( &sbuf[i], &baseaddr);
MPI_Get_address ( &sbuf[i].str, &addr1);
MPI_Get_address ( &sbuf[i].count, &addr2);
displs[0] = addr1 - baseaddr;
displs[1] = addr2 - baseaddr;
MPI_Type_create_struct(2, blens, displs, types, &contigs[i]);
MPI_Type_commit(&contigs[i]);
}
/* send to ourself */
MPI_Sendrecv(sbuf, 5, contigs, 0, 0,
rbuf, 5, contigs, 0, 0,
MPI_COMM_SELF, &status);
for (int i=0; i<5; i++)
MPI_Type_free(&contigs[i]);
MPI_Finalize();
return 0;
}
I get the following warning at compile time:
coll.c(53): warning #810: conversion from "MPI_Datatype={int} *" to "MPI_Datatype={int}" may lose significant bits
MPI_Sendrecv(sbuf, 5, contigs, 0, 0,
^
coll.c(54): warning #810: conversion from "MPI_Datatype={int} *" to "MPI_Datatype={int}" may lose significant bits
rbuf, 5, contigs, 0, 0,
And observe the following error across all processes:
Rank 0 [Thu Jun 16 16:19:24 2016] [c0-0c2s9n1] Fatal error in MPI_Sendrecv: Invalid datatype, error stack:
MPI_Sendrecv(232): MPI_Sendrecv(sbuf=0x9ac440, scount=5, INVALID DATATYPE, dest=0, stag=0, rbuf=0x9ac4a0, rcount=5, INVALID DATATYPE, src=0, rtag=0, MPI_COMM_SELF, status=0x7fffffff6780) failed
Not sure what I am doing wrong. Do i need to further use "MPI_Type_create_resized " to register the "extent"? If so, an example quoting the above scenario would really help.
Also my main goal is to perform "MPI_Alltoallv" using a similar array of structs (of size ~ several thousands). Hopefully if I can get the SendRecv to work I can move on to "MPI_Alltoallv".
Any help would be highly appreciated.
The sendtype and recvtype parameters expect a parameter of type MPI_Datatype. What you're passing in is an array of these, i.e. a MPI_Datatype *.
You can only use one of these array elements at a time to pass to this function.
I'm trying to send a struct which has one of the member as a dynamic array, but this array doesn't seem to be sent properly. Any suggestion on how to do this?
This is what I have:
struct bar
{
int a;
int b;
int* c;
};
void defineMPIType(MPI_Datatype* newType, int cLen, struct bar* msg)
{
int blockLengths[3] = {1, 1, cLen};
MPI_Datatype types[3] = {MPI_INT, MPI_INT, MPI_INT};
MPI_Aint offsets[3];
MPI_Aint addrB, addrC;
MPI_Address(&(msg->b), &addrB);
MPI_Address(msg->c, &addrC);
offsets[0] = offsetof(struct bar, a);
offsets[1] = offsetof(struct bar, b);
offsets[2] = addrC - addrB;
MPI_Type_create_struct(3, blockLengths, offsets, types, newType);
MPI_Type_commit(newType);
}
void main(int argc, char* argv[])
{
MPI_Init(&argc, &argv);
int rank, p;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
int cLen = argv[0];
MPI_Datatype MPI_BAR_TYPE;
struct bar* msg = malloc(sizeof(*msg));
msg->c = malloc(sizeof(int) * cLen);
defineMPIType(&MPI_BAR_TYPE, cLen, msg);
if (rank == 0)
{
msg->a = 1;
msg->b = 2;
for (int i = 0; i < cLen; ++i)
msg->c[i] = i;
MPI_Send(msg, 1, MPI_BAR_TYPE, 1, 111, MPI_COMM_WORLD);
}
else
{
MPI_Status stat;
MPI_Recv(msg, 1, MPI_BAR_TYPE, 0, 111, MPI_COMM_WORLD, &stat);
}
printf("Rank %d has c = [", rank);
for (int i = 0; i < cLen; ++i)
printf("%d, ", msg->c[i]);
printf("]\n");
free(msg);
MPI_Type_free(&MPI_BAR_TYPE);
MPI_Finalize();
}
Members a and b got sent properly, but c didn't.
There are a few issues in your code, even ignoring the issue of the type itself:
The first one is that you allocated memory for your c array only on process #0, then you (tried to) send this data to process #1. But process #1 didn't allocate any memory for storing the message. So even if the way the sending is done was correct, the code would have failed.
Names starting with MPI_ are reserved for the MPI library so you cannot use them as you wish. You have to find another name for your MPI_BAR_TYPE.
This line puzzles me somewhat: int cLen = argv[0]; I imagine you want to read from the command line the size of the array to allocate, in which case maybe that should read something like int clen = atoi(argv[1]); (forgetting about test for validity of this which would need to be properly handled...)
You only test if the process is of rank #0 or not, meaning that if for some reason you launched 3 processes, the process of rank #2 will wait forever for a message from process of rank #0 that will never arrive.
And finally the array itself: in your code there is a big confusion between the pointer c and the data pointed to by c. Your structure embeds the pointer, but not the memory pointed to. So you cannot map into an MPI structure the corresponding data... The most obvious reason is that from one call to the next (or from one process to the next), there is no guaranty that the offset from the address of the structure and the address of the data pointed to by c will be identical (and indeed, it is almost guaranteed it will be different). So you cannot reliably map them.
What you need to do for solving your problem is therefore to only transfer your 2 integers a and b in one go (possibly creating a MPI structure for transferring arrays of them if needed). Then you will transfer the memory pointed by c, which you would have allocated beforehand.
Your code could become for example:
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
struct bar
{
int a;
int b;
int* c;
};
void defineMPIType( MPI_Datatype* newType ) {
struct bar tmp[2];
MPI_Aint extent = &tmp[1] - &tmp[0];
MPI_Type_create_resized( MPI_2INT, 0, extent, newType );
MPI_Type_commit( newType );
}
int main( int argc, char* argv[] ) {
MPI_Init(&argc, &argv);
int rank, p;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
MPI_Comm_size( MPI_COMM_WORLD, &p );
int cLen = atoi( argv[1] );
MPI_Datatype Bar_type;
defineMPIType( &Bar_type );
struct bar msg;
msg.c = ( int* ) malloc( sizeof( int ) * cLen );
if ( rank == 0 ) {
msg.a = 1;
msg.b = 2;
for ( int i = 0; i < cLen; ++i ) {
msg.c[i] = i;
}
MPI_Send( &msg, 1, Bar_type, 1, 111, MPI_COMM_WORLD );
MPI_Send( msg.c, cLen, MPI_INT, 1, 222, MPI_COMM_WORLD );
}
else if ( rank == 1 ) {
MPI_Recv( &msg, 1, Bar_type, 0, 111, MPI_COMM_WORLD, MPI_STATUS_IGNORE );
MPI_Recv( msg.c, cLen, MPI_INT, 0, 222, MPI_COMM_WORLD, MPI_STATUS_IGNORE );
}
printf("Rank %d has a = %d, b = %d, c = [", rank, msg.a, msg.b );
for ( int i = 0; i < cLen - 1; ++i ) {
printf( "%d, ", msg.c[i] );
}
printf( "%d]\n", msg.c[cLen - 1] );
free( msg.c );
MPI_Type_free( &Bar_type );
MPI_Finalize();
return 0;
}
Which gives:
$ mpirun -n 2 ./a.out 3
Rank 0 has a = 1, b = 2, c = [0, 1, 2]
Rank 1 has a = 1, b = 2, c = [0, 1, 2]
Happy MPI coding.