Recursive mpi spawns (spawn from spawn) - c

The problem is quite simple: a manager spawns X workers. Each worker spawns 2 more workers if a condition is met. Everything works fine as long as the workers don't spawn anymore (so condition not met, or if only the first "original" worker spawns; hangs for the other workers).
Manager code:
int main(int argc, char *argv[]) {
int myrank, numworkers, tag = 3, sum = 0, K = 1;
MPI_Status status;
MPI_Comm workercomm;
numworkers = atoi(argv[1]);
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_spawn("Project_child",
MPI_ARGV_NULL, numworkers, MPI_INFO_NULL, 0, MPI_COMM_SELF,
&workercomm, MPI_ERRCODES_IGNORE);
for (int i = 0; i < numworkers; i++) { //sends 1,2,3... to workers
K=i+1;
MPI_Send(&K, 1, MPI_INT, i, tag, workercomm);
}
for (int i = 0; i < numworkers; i++) { //receives an int from worker
MPI_Recv(&K, 1, MPI_INT, i, tag, workercomm, &status);
sum += K;
}
printf("%d \n", sum);
MPI_Comm_free(&workercomm);
MPI_Finalize();
return 0;
}
Worker code:
int main(int argc, char *argv[]) {
int K, myrank, tag = 3;
MPI_Status status;
MPI_Comm parentcomm;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_get_parent(&parentcomm);
MPI_Recv(&K, 1, MPI_INT, 0, tag, parentcomm, &status); //recv K
printf("child %d: k=%d\n", myrank, K);
K++;
if (K<5) { // && myrank==0) {
MPI_Comm childParentComm;
MPI_Comm_spawn("Project_child",
MPI_ARGV_NULL, 2, MPI_INFO_NULL, 0, MPI_COMM_SELF,
&childParentComm, MPI_ERRCODES_IGNORE);
//problem might be here?^
//sends the K to first worker_child
MPI_Send(&K, 1, MPI_INT, 0, tag, childParentComm);
//!!!!!!! IT HANGS HERE if worker != 0
K++;
//sends K+1 to second worker_child
MPI_Send(&K, 1, MPI_INT, 1, tag, childParentComm);
int K1, K2;
MPI_Recv(&K1, 1, MPI_INT, 0, tag, childParentComm, &status);
MPI_Recv(&K2, 1, MPI_INT, 1, tag, childParentComm, &status);
K = K1 + K2;
MPI_Comm_free(&childParentComm);
}
MPI_Send(&K, 1, MPI_INT, 0, tag, parentcomm);
MPI_Comm_free(&parentcomm);
MPI_Finalize();
return 0;
}
OK, I changed the code to simplify it.
Tested the code, problem is the same: worker0 spawns additional children (even his child0 spawns additional children), the others don't and they hang at the second send. So if the condition would be (if (K<5) && myrank==0), it would work, but that's not what I need.

Related

Error running with more than 16 tasks MPI_ERR_TRUNCATE: message truncated

I have this c code that calculates polynomial arrays, where I'm trying to run it from a cluster using MPI.
int main(int argc, char **argv)
{
int id;
int n;
int i, size, arraySize;
double *vet, valor, *vresp, resposta, tempo, a[GRAU + 1];
int hostsize;
char hostname[MPI_MAX_PROCESSOR_NAME];
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Get_processor_name(hostname, &hostsize);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
MPI_Comm_size(MPI_COMM_WORLD, &n);
if (id == 0) // Master
{
MPI_Barrier(MPI_COMM_WORLD);
MPI_Bcast(&a, GRAU, MPI_DOUBLE, 0, MPI_COMM_WORLD);
for (size = TAM_INI; size <= TAM_MAX; size += TAM_INC)
{
tempo = -MPI_Wtime();
for (int dest = 1; dest < n; ++dest)
{
int ini = 0;
int fim = dest * size / (n - 1);
int tam = fim - ini;
MPI_Send(&ini, 1, MPI_INT, dest, 0, MPI_COMM_WORLD);
MPI_Send(&tam, 1, MPI_INT, dest, 0, MPI_COMM_WORLD);
MPI_Send(&x[ini], tam, MPI_DOUBLE, dest, 0, MPI_COMM_WORLD);
ini = fim;
fflush(stdout);
}
int total = 0;
for (int dest = 1; dest < n; ++dest)
{
int ini_escravo;
int tam_escravo;
MPI_Recv(&ini_escravo, 1, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
MPI_Recv(&tam_escravo, 1, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
MPI_Recv(&y[ini_escravo], tam_escravo, MPI_DOUBLE, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
}
tempo += MPI_Wtime();
}
}
else
{ // Slave
MPI_Barrier(MPI_COMM_WORLD);
MPI_Bcast(&a, GRAU, MPI_DOUBLE, 0, MPI_COMM_WORLD);
for (arraySize = TAM_INI; arraySize <= TAM_MAX; arraySize += TAM_INC)
{
int ini, tam;
MPI_Recv(&ini, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(&tam, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(&x[0], tam, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &status);
for (i = 0; i < tam; ++i)
y[i] = polinomio(a, GRAU, x[i]);
MPI_Send(&ini, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(&tam, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(&y[0], tam, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
fflush(stdout);
}
}
MPI_Finalize();
return 0;
}
The code works fine when I run using 16 tasks or less per node. If I try to run using 32 tasks (16 per node, with 2 nodes), I get the following message:
[06:272259] *** An error occurred in MPI_Recv [06:272259] *** reported
by process [2965045249,0] [06:272259] *** on communicator
MPI_COMM_WORLD [06:272259] *** MPI_ERR_TRUNCATE: message truncated
[06:272259] *** MPI_ERRORS_ARE_FATAL (processes in this communicator
will now abort, [06:272259] *** and potentially your MPI job)
[07][[45243,1],31][btl_tcp.c:559:mca_btl_tcp_recv_blocking] recv(20)
failed: Connection reset by peer (104)
Any idea about what I am missing here?

How to sum a 2D array in C using MPI

This is the program I am using to sum all values in a 1D array, and it works correctly. But how do I modify it to work on 2D array? Imagine variable a is something like a = { {1,2}, {3,4}, {5,6} };.
I tried few solutions but they are not working, so can someone explain few important changes to make to make it compatible with 2D array also.
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
// size of array
#define n 10
int a[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
// Temporary array for slave process
int a2[1000];
int main(int argc, char* argv[])
{
int pid, np,
elements_per_process,
n_elements_recieved;
// np -> no. of processes
// pid -> process id
MPI_Status status;
// Creation of parallel processes
MPI_Init(&argc, &argv);
// find out process ID,
// and how many processes were started
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &np);
// master process
if (pid == 0) {
int index, i;
elements_per_process = n / np;
// check if more than 1 processes are run
if (np > 1) {
// distributes the portion of array
// to child processes to calculate
// their partial sums
for (i = 1; i < np - 1; i++) {
index = i * elements_per_process;
MPI_Send(&elements_per_process,
1, MPI_INT, i, 0,
MPI_COMM_WORLD);
MPI_Send(&a[index],
elements_per_process,
MPI_INT, i, 0,
MPI_COMM_WORLD);
}
// last process adds remaining elements
index = i * elements_per_process;
int elements_left = n - index;
MPI_Send(&elements_left,
1, MPI_INT,
i, 0,
MPI_COMM_WORLD);
MPI_Send(&a[index],
elements_left,
MPI_INT, i, 0,
MPI_COMM_WORLD);
}
// master process add its own sub array
int sum = 0;
for (i = 0; i < elements_per_process; i++)
sum += a[i];
// collects partial sums from other processes
int tmp;
for (i = 1; i < np; i++) {
MPI_Recv(&tmp, 1, MPI_INT,
MPI_ANY_SOURCE, 0,
MPI_COMM_WORLD,
&status);
int sender = status.MPI_SOURCE;
sum += tmp;
}
// prints the final sum of array
printf("Sum of array is : %d\n", sum);
}
// slave processes
else {
MPI_Recv(&n_elements_recieved,
1, MPI_INT, 0, 0,
MPI_COMM_WORLD,
&status);
// stores the received array segment
// in local array a2
MPI_Recv(&a2, n_elements_recieved,
MPI_INT, 0, 0,
MPI_COMM_WORLD,
&status);
// calculates its partial sum
int partial_sum = 0;
for (int i = 0; i < n_elements_recieved; i++)
partial_sum += a2[i];
// sends the partial sum to the root process
MPI_Send(&partial_sum, 1, MPI_INT,
0, 0, MPI_COMM_WORLD);
}
// cleans up all MPI state before exit of process
MPI_Finalize();
return 0;
}
You can simplify a lot by using MPI_Reduce instead of MPI_Send/MPI_Recv:
Reduces values on all processes to a single value
A nice tutorial about that routine can be found here.
So each process contains an array (e.g., process 0 { 1, 2, 3, 4, 5} and process 1 {6, 7, 8, 9, 10 }) and performs the partial sum of that array. In the end, each process uses MPI_Reduce to sum all the partial sums into a single value available to the master process (it could have been another process as well). Have a look at this example:
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[]){
int np, pid;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &np);
int partial_sum = 0;
if (pid == 0) {
int a[] = { 1, 2, 3, 4, 5};
for(int i = 0; i < 5; i++)
partial_sum += a[i];
}
else if (pid == 1){
int a[] = {6, 7, 8, 9, 10};
for(int i = 0; i < 5; i++)
partial_sum += a[i];
}
int sum;
MPI_Reduce(&partial_sum, &sum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (pid == 0){
printf("Sum of array is : %d\n", sum);
}
MPI_Finalize();
return 0;
}
This code only works with 2 processes (and it is kind of silly( but I am using it to showcase the use of the MPI_Reduce.
I tried few solutions but they are not working, so can someone explain
few important changes to make to make it compatible with 2D array
also.
If you adapt your code to use the MPI_Reduce as I have shown, then it does not matter if it a 1D or 2D array, because you will first do the partial sum into a single value and then performance the reduction.
Alternatively, you can also have each row assigned to a process and then perform a reduction of the entire array, and then the master process performs the sum of the resulting array.
An example:
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[]){
int np, pid;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &np);
int partial_sum = 0;
int size = 5;
int a[5] = {1, 2, 3 , 4, 5};
int sum[5] = {0};
MPI_Reduce(&a, &sum, size, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (pid == 0){
int total_sum = 0;
for(int i = 0; i < size; i++)
total_sum += sum[i];
printf("Sum of array is : %d\n", total_sum);
}
MPI_Finalize();
return 0;
}
Output (for two processes):
Sum of array is : 30

Sharing a dynamically allocated 2D array with MPI [duplicate]

This question already has answers here:
Sending and receiving 2D array over MPI
(3 answers)
Closed 2 years ago.
I am trying to share a dynamically allocated 2D array from a master thread to several other threads using MPI in c, from within a function.
A simplified representation of the relevant code is as follows:
//Initialize program, start up the desired number of threads.
//Master thread takes input from user, dynamically allocates and constructs 2d array.
//All threads call method analyze_inputs(**array), which takes the array as input (all threads other than master simply pass NULL as argument)
//The master thread shares the array, along with work division to all other threads:
{//Master thread
MPI_Send(&x, 1, MPI_INT, recievingThread, 0, MPI_COMM_WORLD);
MPI_Send(&y, 1, MPI_INT, recievingThread, 0, MPI_COMM_WORLD);
MPI_Send(&(array[0][0]), x*y, MPI_INT, recievingThread, 0, MPI_COMM_WORLD);
}
{//Subthreads
MPI_Recv(&x, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&y, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&(array[0][0]), x*y, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
This is a soulution i found on this site for sending dynamically allocated 2d arrays, but i get segmentation error for the array recieve.
How can i do this?
edit: Minimal reproducible example
#include <mpi.h>
#include <stdlib.h>
#include <stdio.h>
int analyze_inputs (int x, int y, int** array);
int main (int argc, char **argv)
{
int x = 10;
int y = 8;
int rank;
int **array = NULL;
MPI_Init (&argc, &argv);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
if (rank == 0)
{
array = malloc(x * sizeof(int*));
for (int i = 0; i < x; i++)
{
array[i] = malloc(y * sizeof(int));
}
for (int i = 0; i < x; i++)
{
for (int j = 0; j < y; j++)
{
array[i][j] = rand();
}
}
}
analyze_inputs(x,y,array);
MPI_Finalize ();
}
int analyze_inputs(int x,int y, int** array)
{
int rank, x_temp, y_temp, **array_temp;
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
if (rank == 0)
{
MPI_Send(&x, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
MPI_Send(&y, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
MPI_Send(&(array[0][0]), x*y, MPI_INT, 1, 0, MPI_COMM_WORLD);
}
else
{
MPI_Recv(&x_temp, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&y_temp, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Works to here.\n");
MPI_Recv(&(array_temp[0][0]), x_temp*y_temp, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Crashes before here.\n");
}
}
Each row of array is allocated separately in your code, so simple
MPI_Send(&(array[0][0]), x*y, MPI_INT, 1, 0, MPI_COMM_WORLD);
won't work in this case.
An simple solution is to allocate a single block of memory like this:
array = malloc(x * sizeof(int*));
array[0] = malloc(y * x * sizeof(int));
for (int i = 1; i < x; i++)
{
array[i] = array[0] + y * i;
}
And freeing this array will be
free(array[0]);
free(array);
Do not free array[1], array[2], ... in this case because they are already freed by free(array[0]);.

Vigenere on MPI error MPI_COMM_WORLD can not encrypt or decrypt

I'm newbie using open-mpi, i want to use MPI to solve viginere cipher problem, my problems are :
1. doesn't even line up
2. After i insert words and key that i want to encrypt, this error comes up
[mpi-VirtualBox:1646] *** An error occurred in MPI_Recv
[mpi-VirtualBox:1646] *** reported by process [2610495489,0]
[mpi-VirtualBox:1646] *** on communicator MPI_COMM_WORLD
[mpi-VirtualBox:1646] *** MPI_ERR_RANK: invalid rank
[mpi-VirtualBox:1646] *** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
[mpi-VirtualBox:1646] *** and potentially your MPI job)
here is my code
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include<string.h>
#include <time.h>
void my_bcast(void* data, int count, MPI_Datatype datatype, int root,
MPI_Comm communicator) {
int world_rank;
MPI_Comm_rank(communicator, &world_rank);
int world_size;
MPI_Comm_size(communicator, &world_size);
if (world_rank == root) {
// If we are the root process, send our data to everyone
int i;
for (i = 0; i < world_size; i++) {
if (i != world_rank) {
MPI_Send(data, count, datatype, i, 0, communicator);
}
}
} else {
// If we are a receiver process, receive the data from the root
MPI_Recv(data, count, datatype, root, 0, communicator, MPI_STATUS_IGNORE);
}
}
int k,j,lenPesan, lenKunci;
char pesan[1000];
char kunci[1000];
char kunciBaru[1000];
char encryptedPesan[1000];
char decryptedPesan[1000];
int main(int argc, char** argv) {
MPI_Init(NULL, NULL);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
if (world_rank == 0) {
int i;
for (i=0; i<world_size;i++){
printf("Program Vigenere Cipher\n");
printf("Encryption dan Decryption\n");
printf("Menggunakan implementasi Message Passing Computing\n");
printf("--------------------------------------------------\n");
printf("Masukkan Pesan (huruf besar tanpa spasi) = ");
scanf("%s",pesan);
printf("\nMasukkan Key (huruf kecil tanpa spasi) = ");
scanf("%s",kunci);
char kunciBaru[lenPesan],encryptedPesan[lenPesan],decryptedPesan[lenPesan];
lenPesan = strlen(pesan);
lenKunci = strlen(kunci);
for(k = 0, j = 0; k < lenPesan; ++k, ++j){
if(j == lenKunci)
j = 0;
kunciBaru[k] = kunci[j];
}
kunciBaru[k] = '\0';
MPI_Status status;
my_bcast(&pesan, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
my_bcast(&kunci, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
my_bcast(&lenPesan, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
my_bcast(&lenKunci, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
my_bcast(&kunciBaru, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
MPI_Recv(&encryptedPesan, 1, MPI_CHAR, i, 0, MPI_COMM_WORLD, &status);
MPI_Recv(&decryptedPesan, 1, MPI_CHAR, i, 0, MPI_COMM_WORLD, &status);
}
printf("Original Message: %s", pesan);
printf("\nKey: %s", kunci);
printf("\nNew Generated Key: %s", kunciBaru);
printf("\nEncrypted Message: %s", encryptedPesan);
printf("\nDecrypted Message: %s\n", decryptedPesan);
return 0;
} else {
my_bcast(&pesan, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
my_bcast(&kunci, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
my_bcast(&lenPesan, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
my_bcast(&lenKunci, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
my_bcast(&kunciBaru, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
for(k = 0; k < lenPesan; ++k)
encryptedPesan[k] = ((pesan[k] + kunciBaru[k]) % 26) + 'A';
encryptedPesan[k] = '\0';
MPI_Send(&encryptedPesan, 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
for(k = 0; k < lenPesan; ++k)
decryptedPesan[k] = (((encryptedPesan[k] - kunciBaru[k]) + 26) % 26) + 'A';
decryptedPesan[k] = '\0';
MPI_Send(&decryptedPesan, 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
}
MPI_Finalize();
}
So far i've tried changing rank of source process to root, the problem still exist, and change rank of source to i still have the same problem.
i know it's a mess please don't judge me. If someone can help i will appreciate your help a lot, sorry for my bad language. Thanks

Looping through an MPI ring in C

I'm having trouble figuring out how to do multiple iterations in my MPI code. Just from testing multiple for loops in different spots tells me I am doing something wrong. Part of our instructions are to pass the token around the ring multiple times specified in the command line arguments, but i cant seem to get it to work correctly by even hard coding the iterations. I left the top for loop in(Which doesn't work) but commented out the others.
This is probably a more complicated task than I am anticipating.
Any help is appreciated
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
double starttime, endtime;
MPI_Init(&argc, &argv);
starttime = MPI_Wtime();
int world_rank;
int world_size;
for (int i = 0; i < 3; i++)
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
//pass the token
int token =1000;
//for (int i = 0; i < 3; i++)
//{
if (world_rank == 0)
{
token = 0;
MPI_Send(&token, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
MPI_Recv(&token, 1, MPI_INT, world_size - 1, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
printf("FINAL TOKEN %d\n", token);
}
else if(world_rank < world_size - 1 )
{
MPI_Recv(&token, 1, MPI_INT, world_rank-1, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
printf("%d has token %d sending to %d\n", world_rank, token,
world_rank+1);
token = token + 2;
MPI_Send(&token, 1, MPI_INT, world_rank+1, 0, MPI_COMM_WORLD);
}
else
{
MPI_Recv(&token, 1, MPI_INT, world_rank-1, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
printf("%d has token %d sending to %d\n", world_rank, token,
world_rank+1);
token = token + 2;
MPI_Send(&token, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
endtime = MPI_Wtime();
printf("That took %f seconds\n",endtime-starttime);
}
MPI_Finalize();
return 0;
}
Here's the compile and run output:
$ mpicc -std=c99 test.c
$ mpirun -np 3 ./a.out
1 has token 0 sending to 2
That took 0.000187 seconds
2 has token 2 sending to 3
That took 0.000145 seconds
FINAL TOKEN 4
That took 0.000180 seconds
FINAL TOKEN 4
That took 0.000212 seconds
2 has token 2 sending to 3
That took 0.000202 seconds
2 has token 2 sending to 3
That took 0.000233 seconds
1 has token 0 sending to 2
That took 0.000291 seconds
1 has token 0 sending to 2
That took 0.000322 seconds
FINAL TOKEN 4
That took 0.000244 seconds
As noted in this comment, the for loop is missing braces {} and the token would be reset each iteration of the loop.
Also, you can remove one of your cases and include world_rank==world_size with the modulo operator %.
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
double starttime, endtime;
MPI_Init(&argc, &argv);
starttime = MPI_Wtime();
int world_rank;
int world_size;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
//pass the token
int token =1000;
for (int i = 0; i < 3; i++)
{
if (world_rank == 0)
{
// token = 0; removed
MPI_Send(&token, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
MPI_Recv(&token, 1, MPI_INT, world_size - 1, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
printf("FINAL TOKEN %d\n", token);
}
else
{
MPI_Recv(&token, 1, MPI_INT, world_rank-1, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
printf("%d has token %d sending to %d\n", world_rank, token,
(world_rank+1)%world_size); // modified
token = token + 2;
MPI_Send(&token, 1, MPI_INT, (world_rank+1)%world_size, 0, MPI_COMM_WORLD); // modified
}
endtime = MPI_Wtime();
printf("That took %f seconds\n",endtime-starttime);
}
MPI_Finalize();
return 0;
}

Resources