An error occured in MPI_Recv while sending an array - c

#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
int main(int argc, char **argv)
{
int N;
scanf("%d", &N);
double *a = (double *)malloc(N * sizeof(double));
int i, rank, size, tag = 99, tag1 = 100;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0)
{
for(int j=0;j<N;++j)
{
a[j] = j+0.1;
}
for (i = 1; i < size; i++)
{
MPI_Send(&N, 1, MPI_INT, i, tag1, MPI_COMM_WORLD);
MPI_Send(a, N, MPI_DOUBLE, i, tag, MPI_COMM_WORLD);
}
}
else
{
MPI_Recv(&N, 1, MPI_INT, 0, tag1, MPI_COMM_WORLD, &status);
MPI_Recv(a, N, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD, &status);
// for(int j=0;j<N*2;++j)
// printf("%d %f\n", rank, a[j]);
}
MPI_Barrier(MPI_COMM_WORLD);
printf("Message from process %d : %f\n", rank, a[rank]);
MPI_Finalize();
return 0;
}
I'm creating the array 'a' in 0th process and sending it to remaining proccesses. But I'm getting the following error upon doing this.
[nikhil:8599] *** An error occurred in MPI_Recv
[nikhil:8599] *** reported by process [4228579329,1]
[nikhil:8599] *** on communicator MPI_COMM_WORLD
[nikhil:8599] *** MPI_ERR_BUFFER: invalid buffer pointer
[nikhil:8599] *** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
[nikhil:8599] *** and potentially your MPI job)
[nikhil:08593] 2 more processes have sent help message help-mpi-errors.txt / mpi_errors_are_fatal
[nikhil:08593] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
Can anybody explain why I'm getting this error?
As you can see in the code there's a for loop containing a print statement which is commented. The weird thing is... upon uncommenting that loop. It's working fine.

Thoughts:
MPI_Init should be the first thing in your program.
Only one rank should scanf.
N is not communicated across ranks, so you are allocating memory of undefined size.
Define variables as close to their point of usage as possible. Putting int i at the top of your function is a disaster waiting to happen.
The barrier at the end is unnecessary.
All the ranks need to allocate their own memory.
That gets us to this code:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
int main(int argc, char **argv){
MPI_Init(&argc, &argv);
const int tag = 99;
const int tag1 = 100;
int rank, size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
double *a; //Pointer to the memory we will allocate
int N;
if (rank == 0){
scanf("%d", &N);
a = (double *)malloc(N * sizeof(double));
for(int j=0;j<N;++j){
a[j] = j+0.1;
}
for (int i = 1; i < size; i++){
MPI_Send(&N, 1, MPI_INT, i, tag1, MPI_COMM_WORLD);
MPI_Send(a, N, MPI_DOUBLE, i, tag, MPI_COMM_WORLD);
}
} else {
MPI_Status status;
MPI_Recv(&N, 1, MPI_INT, 0, tag1, MPI_COMM_WORLD, &status);
//Have to allocate memory on all ranks
a = (double *)malloc(N * sizeof(double));
MPI_Recv(a, N, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD, &status);
// for(int j=0;j<N*2;++j)
// printf("%d %f\n", rank, a[j]);
}
printf("Message from process %d : %f\n", rank, a[rank]);
MPI_Finalize();
return 0;
}
Doing it better
The broadcast command is your friend here:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define MPI_Error_Check(x) {const int err=x; if(x!=MPI_SUCCESS) { fprintf(stderr, "MPI ERROR %d at %d.", err, __LINE__);}}
int main(int argc, char **argv){
MPI_Init(&argc, &argv);
int rank, size;
MPI_Error_Check(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
MPI_Error_Check(MPI_Comm_size(MPI_COMM_WORLD, &size));
int N;
if (rank==0){
scanf("%d", &N);
}
MPI_Error_Check(MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD));
double *a = (double *)malloc(N * sizeof(double));
if(rank==0){
for(int j=0;j<N;++j){
a[j] = j+0.1;
}
}
printf("Message from process %d : N=%d\n", rank, N);
MPI_Error_Check(MPI_Bcast(a, N, MPI_DOUBLE, 0, MPI_COMM_WORLD));
fprintf(stderr, "Message from process %d : %f\n", rank, a[rank]);
free(a);
MPI_Finalize();
return 0;
}
Doing It Even Better
The fastest form of communication is no communication at all. In your case, once the value N is known each rank can recreate the data on its own:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define MPI_Error_Check(x) {const int err=x; if(x!=MPI_SUCCESS) { fprintf(stderr, "MPI ERROR %d at %d.", err, __LINE__);}}
int main(int argc, char **argv){
MPI_Init(&argc, &argv);
int rank, size;
MPI_Error_Check(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
MPI_Error_Check(MPI_Comm_size(MPI_COMM_WORLD, &size));
int N;
if (rank==0){
scanf("%d", &N);
}
MPI_Error_Check(MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD));
double *a = (double *)malloc(N * sizeof(double));
for(int j=0;j<N;++j){
a[j] = j+0.1;
}
printf("Message from process %d : N=%d\n", rank, N);
fprintf(stderr, "Message from process %d : %f\n", rank, a[rank]);
free(a);
MPI_Finalize();
return 0;
}

Related

When trying to broadcast via MPI, my broadcast doesn't seem to actually work

I'm trying to use MPI to broadcast an array to nodes 1 and 2 from node 0. The array has values in it, however, I cannot seem to successfully broadcast the array despite trying a few different things based on suggestions found here and elsewhere on the net. When I run this, which asks for a file name within the same directory we're running the code from (that file is guaranteed to contain only integers, 1-per-line) I end up with a "Fatal error: glibc detected an invalid stdio handle", and it happens around my MPI_Bcast(M, N, MPI_INT, 0, MPI_COMM_WORLD) line, but I can't seem to pinpoint or correct the problem.
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <mpi.h>
int fuelCalc(int array[], int elementCount, int myrank, int worldsize){
int fuelSum = 0;
int addlFuelReq = 0;
printf("Rank %d has array[0] value %d\n", myrank, array[0]);
for(int i = 0; i < elementCount; i++){
if( (i % worldsize) == myrank){
usleep(1000000);
addlFuelReq = (array[i]/4) - 3;
if(addlFuelReq < 1){addlFuelReq = 1;}
fuelSum += addlFuelReq;
}
}
return fuelSum;
}
int main(){
int i = 0, N = 0;
char fuelFile[30];
char comp;
int totalFuel;
int myrank, worldsize;
FILE *file;
int mysum;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &worldsize);
if(myrank == 0){
printf("What file should be used?\n");
scanf("%s", fuelFile);
file = fopen(fuelFile, "r");
if(file == NULL){
printf("The file entered does not exist.\n");
return 0;
}
for(comp = getc(file); comp!=EOF;comp=getc(file)){
if(comp == '\n'){
N = N+1;
}
}
printf("# of Lines: %d\n", N);
}
MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);
printf("rank %d has N value %d\n", myrank, N);
int M[N];
fseek(file, 0, SEEK_SET);
for( i=0; i<N; i++){
fscanf(file, "%d\n", &M[i]);
}
if(myrank==0){printf("M[0] = %d\n", M[0]);}
if(myrank==0){printf("Successfully scanned file in to M\n");}
MPI_Bcast(M, N, MPI_INT, 0, MPI_COMM_WORLD);
printf("Rank %d has M[0] of %d\n", myrank, M[0]);
mysum = fuelCalc(M, N, myrank, worldsize);
if(myrank==0){printf("Successfully sent M, N, myrank, worldsize to other nodes\n");}
MPI_Reduce(&mysum, &totalFuel,1,MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if(myrank==0){printf("Successfully reduced stuff\n");}
if(myrank == 0){
totalFuel = (totalFuel+((totalFuel/100)*10));
printf("Total fuel required: %d\n", totalFuel);
}
MPI_Finalize();
}
As far as I can tell, you only open the file on rank 0 so the call to "fseek" will fail on all other processes as the file pointers will be invalid.

MPI_Scatter segmentation fault with malloc

Can you explain why this code works perfectly fine:
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
typedef struct Point
{
double x;
double y;
} point;
int main(int argc, char *argv[]) {
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Datatype mpi_point;
MPI_Type_contiguous(2, MPI_DOUBLE, &mpi_point);
MPI_Type_commit(&mpi_point);
point local[2];
if(rank==0)
{
point *buf;
buf = malloc(10*sizeof(point));
for(int i = 0; i<10; i++)
{
buf[i].x= (double)i;
buf[i].y= (double)i+i;
}
MPI_Scatter(buf, 2, mpi_point, &local, 2, mpi_point, 0, MPI_COMM_WORLD);
free(buf);
}
else
{
point *buf = NULL;
MPI_Scatter(buf, 2, mpi_point, &local, 2, mpi_point, 0, MPI_COMM_WORLD);
}
printf("Hello, process %d has points: 1-> %f %f 2-> %f %f\n", rank, local[0].x, local[0].y, local[1].x, local[1].y);
MPI_Finalize();
return 0;
}
and this one below doesn't work? A lot of error output ending with segmentation fault.
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
typedef struct Point
{
double x;
double y;
} point;
int main(int argc, char *argv[]) {
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Datatype mpi_point;
MPI_Type_contiguous(2, MPI_DOUBLE, &mpi_point);
MPI_Type_commit(&mpi_point);
point *local;
if(rank==0)
{
point *buf;
buf = malloc(10*sizeof(point));
for(int i = 0; i<10; i++)
{
buf[i].x= (double)i;
buf[i].y= (double)i+i;
}
MPI_Scatter(buf, 2, mpi_point, &local, 2, mpi_point, 0, MPI_COMM_WORLD);
free(buf);
}
else
{
point *buf = NULL;
local = malloc(2*sizeof(point));
MPI_Scatter(buf, 2, mpi_point, &local, 2, mpi_point, 0, MPI_COMM_WORLD);
}
printf("Hello, process %d has points: 1-> %f %f 2-> %f %f\n", rank, local[0].x, local[0].y, local[1].x, local[1].y);
MPI_Finalize();
return 0;
}
The only difference is that local in one case is defined as a vector "directly" and in the other one it's a pointer and then I do the malloc. Shouldn't it be the same thing? Both of them are allocated in the stack with the same dimension, so how is it possible that the one with malloc doesn't work?

MPI sum of array receive working for only one rank

I am trying to find the sum of an array of length 100 elements using MPI, under the restrictions of only using MPI_Send and MPI_receive , the code that I have written finds the sum of each processor but during the re-send to the main processor(rank=0) my code only receives from one processor
My Code
#include "stdafx.h"
#include <stdio.h>
#include <string.h>
#include "mpi.h"
#include "math.h"
int val = 1;
int main(int argc, char* argv[]) {
int my_rank;
int p;
int ierr;
int i;
int a[100];
int q=0;
for (i = 0; i <100; i++)
{
a[i] = i+1;
}
int send,recv;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
int part = 100 /(p-1);
if (my_rank == 0)
{
for (i = 1; i < p; i++)
{
send = part * (i-1);
MPI_Send(&send, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
}
}
else
{
MPI_Recv(&recv, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
for (i = recv; i < recv + part; i++)
{
val = val+a[i];
}
printf("%d\n", val);
MPI_Send(&val, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
if (my_rank == 0)
{
MPI_Recv(&val, 1, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
printf("%d", val);
q = q + val;
}
MPI_Finalize();
if (my_rank == 0)
{
printf("The output is %d\n", q);
}
return 0;
}
My output
where am i going wrong
Because you only recieve the result from one process. To recieve all results, iterate over process ranks:
if (my_rank == 0)
{
for (rank = 1; rank < proc_cnt; rank++)
{
MPI_Recv(&val, 1, MPI_INT, rank, 0, MPI_COMM_WORLD, &status);
printf("value of rank %d is %d", rank, val);
q = q + val;
}
}
Ordinarily, this a bad practice and may lead to deadlocks. Use mpi_gather() if allowed.

For some reason MPI_Waitall gets stuck (in a deadlock I believe) when I test my program with big numbers

For some reason MPI_Waitall is waiting forever when I enter 10000 as the length for the sequence. Basically I create 4 lists of length n/4 where in this case n is 10000 and I an using non-blocking send so my process 0 does not wait for each list to be sent separately as they do not share any values so they are not overwritten.
Keep in mind that the program works with smaller numbers like 1000 or 100 but I am not sure why it does not work with 10000+.
Here is my code:
#include "ore_header.h"
int main(int argc, char** argv) {
srand(time(NULL));
int my_rank, p;
void generate_sequence(int *arr, int n);
int subsequence_check(int *arr,int n, int m);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
int total;
int length;
int flag;
int seq_length;
MPI_Status stats[p];
MPI_Request reqs[p];
int p_length=0;
int *buf[p];
if (my_rank == 0) {
printf("Enter length and sequence length\n");
scanf("%d %d",&length, &seq_length);
p_length = length / p;
for (int i = 0; i < p; i++) {
buf[i] = (int*)malloc(p_length*sizeof(int));
generate_sequence(buf[i], p_length);
MPI_Isend(buf[i], p_length, MPI_INT, i, 0, MPI_COMM_WORLD, &reqs[i]);
printf("Data sent to process %d\n", i);
}
MPI_Waitall(p, reqs, stats); //Program wont go past this line
printf("\n\n Data sent to all processes \n\n");
}
MPI_Bcast(&p_length, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&seq_length, 1, MPI_INT, 0, MPI_COMM_WORLD);
buf[my_rank] = (int*)malloc(p_length*sizeof(int));
MPI_Recv(buf[my_rank], p_length, MPI_INT, 0, 0, MPI_COMM_WORLD, &stats[my_rank]);
printf("\nData received on process: %d Length: %d\n",my_rank,p_length);
//for (int i = 0; i < p_length; i++) {
// printf("%d",buf[my_rank][i]);
//}
//printf("\n");
total = subsequence_check(buf[my_rank],p_length,seq_length);
printf("\nI am process: %d\nTotal: %d\n",my_rank,total);
MPI_Finalize();
return (0);
}

How to use MPI_Bcast and MPI_Gather in a function

I am new to MPI and trying to learn how to use MPI_Bcast and MPI_Gather inside a function. If I write the code without using a function in following way, it works fine and give the correct result: Printing Result at the end:
localdata[0]:19, localdata[1]:19, localdata[2]:20, localdata[3]:20, localdata[4]:21, localdata[5]:21, localdata[6]:22, localdata[7]:22,
To compile and Run with 4 processors, used the following commands:
:$ mpicc test.c -o test
:$ mpirun -np 4 test
#include <mpi.h>
#include <stdio.h>
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
int i, total;
int arr[20];
int localdata[8];
if (world_rank == 0)
{
for(i=0;i<20;i++)
arr[i]=i;
MPI_Bcast( &arr, 20, MPI_INT, 0, MPI_COMM_WORLD );
printf("\n\n* At Process %d received\n", world_rank);
for(i=0;i<20;i++)
{
printf("ARR[%d]:%d, ",i, arr[i]);
total=arr[i]+world_rank;
}
printf("\n");
localdata[0]=total;
localdata[1]=total;
MPI_Gather(localdata, 2, MPI_INT, localdata, 2, MPI_INT,0, MPI_COMM_WORLD);
printf("\n\nPrinting Result at the end:\n");
for(i=0;i<8;i++)
{
printf("localdata[%d]:%d, ",i, localdata[i]);
}
printf("\n\n");
}
else
{
MPI_Bcast( &arr, 20, MPI_INT, 0, MPI_COMM_WORLD );
printf("\n\n* At Process %d received\n", world_rank);
for(i=0;i<20;i++)
{
printf("ARR[%d]:%d, ",i, arr[i]);
total=arr[i]+world_rank;
}
printf("\n");
localdata[0]=total;
localdata[1]=total;
MPI_Gather(localdata, 2, MPI_INT, localdata, 2, MPI_INT,0, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
When I put these codes in a function called function_cal, it gives me the following error:
*** Process received signal ***
Signal: Segmentation fault (11)
Signal code: Address not mapped (1)
Failing at address: 0x7fffdeb18040
Following is the code after putting MPI_Bcast and MPI_Gather in a function. Any help will be greatly appreciated.
#include <mpi.h>
#include <stdio.h>
void function_cal(int arr[20], int localdata[8], int world_rank)
{
int i, total;
MPI_Bcast( &arr, 20, MPI_INT, 0, MPI_COMM_WORLD );
printf("\n\n* At Process %d received\n", world_rank);
for(i=0;i<20;i++)
{
printf("ARR[%d]:%d, ",i, arr[i]);
total=arr[i]+world_rank;
}
printf("\n");
localdata[0]=total;
localdata[1]=total;
MPI_Gather(localdata, 2, MPI_INT, localdata, 2, MPI_INT,0, MPI_COMM_WORLD);
}
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
int i;
int arr[20];
int localdata[8];
if (world_rank == 0)
{
for(i=0;i<20;i++)
arr[i]=i;
function_cal(arr, localdata, world_rank);
printf("\n\nPrinting Result at the end:\n");
for(i=0;i<8;i++)
{
printf("localdata[%d]:%d, ",i, localdata[i]);
}
printf("\n\n");
}
else
{
function_cal(arr, localdata, world_rank);
}
MPI_Finalize();
return 0;
}

Resources