I am trying to send multiple columns of "B" matrix to different processors from processor 0. I am trying to send use MPI_Send but its not working. Can someone pls help me?
For example : Size of square matrix B is 7.
In this way ,it should be distributed.
Processor 0: 3 columns
Processor 1 :2 columns
Processor 2: 2 columns
#include <stdlib.h>
#include <mpi.h>
#include <stdio.h>
#define ERR_BADORDER 255
#define TAG_INIT 31337
#define TAG_RESULT 42
#define DISP_MAXORDER 12
int mm(double *A, double *B, double *C, int n, int n1);
int rc(int rt,int rank, int size);
int main(int argc, char *argv[]) {
double *A, *B, *C,t,tt;
int n = 0, n0, n1, n2, i,ss,sts;
int rank = 0, size = 1,prev,next,k,z,jcol,ix=0,m,j;
MPI_Datatype column;
MPI_Request reqs[4];
MPI_Status stats[2];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (!rank) {
if (argc > 1) {
n = atoi(argv[1]);
}
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (!n) {
MPI_Finalize();
return 0;
}
n1 = rc(n, rank,size);
n0 = n * n1;
n2 = n * n;
A = (double *) malloc(sizeof(double) * (rank ? n0 : n2));
B = (double *) malloc(sizeof(double) * n2 );
C = (double *) malloc(sizeof(double) * (rank ? n0 : n2));
if (!rank) {
for (i=0; i<n2; i++) {
A[i] = 1.0;
B[i] = 1.0;
}
}
t = MPI_Wtime();
if (!rank) {
ss = n0;
for (i=1; i<size; i++) {
sts = n * rc(n, i, size);
MPI_Send(A + ss, sts, MPI_DOUBLE, i, TAG_INIT,
MPI_COMM_WORLD);
ss += sts;
}
}
else {
MPI_Recv(A, n0, MPI_DOUBLE, 0, TAG_INIT, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}
MPI_Type_vector(n,1,n,MPI_DOUBLE, &column);
MPI_Type_commit(&column);
if (!rank) {
for (i=1; i<size; i++) {
for(m=0;m<=i-1;m++)
ix+=rc(n,m,size);
ss=rc(n,i,size);
for(j=ix;j<ss+ix;j++)
MPI_Send(&B[j], 1, column, i, TAG_INIT, MPI_COMM_WORLD);
/* MPI_Send(&B[i+(n-1)*n], 1, column, i, TAG_INIT,
MPI_COMM_WORLD);*/
}
}
else {
printf("hello");
MPI_Recv(B, n, MPI_DOUBLE, 0, TAG_INIT, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}
for (i=0; i<n0; i++) {
printf("Processor: %d and matrix %lf \n ",rank, B[i]);
}
for (i=0; i<n0; i++) {
C[i] = 0.0;
}
MPI_Finalize();
return 0;
}
int rc(int rt, int rank, int size) {
return (rt / size) + (rt % size > rank);
}
Please don't call the values with two-three letters, because I can't understand what do you want to do.
You can resolve the problem by different ways. When n = 7 and I have 3 process, I send 2 columns to each process different to the master 0.
#include <stdlib.h>
#include <mpi.h>
#include <stdio.h>
#define ERR_BADORDER 255
#define TAG_INIT 31337
#define TAG_RESULT 42
#define DISP_MAXORDER 12
int main(int argc, char *argv[]) {
double *B;
int n = 0;
int rank , size;
int i;
int columnToSend;
MPI_Datatype column;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (!rank)printf ("rank = %d , size = %d\n", rank, size);
if (!rank) {
if (argc > 1) {
n = atoi(argv[1]);
}
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (!n) {
printf ("n = %d!!\n", n);
MPI_Finalize();
return 0;
}
int offset = n%size;
int gap = n/size;
if (!rank)printf ("n = %d, offset = %d , gap = %d\n", n, offset, gap);
MPI_Type_vector(n,gap,n,MPI_DOUBLE, &column);
MPI_Type_commit(&column);
B = (double *) malloc(sizeof(double) * n*n );
for (i = 0 ; i < n * n ; i++) {
B[i] = -1.0;
}
if (!rank) {
for (i = 0 ; i < n * n ; i++) {
B[i] = i;//<----- I put i instead one
}
for (i=1; i < size; i++) {
columnToSend = gap *i + offset;
printf ("columnToSend = %d to i = %d \n", columnToSend, i);
MPI_Send(&B[columnToSend], 1, column, i, TAG_INIT, MPI_COMM_WORLD);
}
}
if (rank) {
printf ("in rank = %d n*gap = %d \n", rank, n*gap);
MPI_Recv(B, n*gap, MPI_DOUBLE, 0, TAG_INIT, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
for (i=0; i < n*gap; i++) {
printf("Processor: %d and matrix %lf \n ",rank, B[i]);
}
}
MPI_Finalize();
return 0;
}
Related
I tried running it with microsoft MPI but its not generating desired output.
#include <stdio.h>
#include <math.h>
#include "mpi.h"
double function(double x) {
// Define the function to be integrated here
return x * sin(x);
}
int main(int argc, char *argv[]) {
double a, b, h, sum, x;
int n, p, rank, size, i;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (rank == 0) {
printf("Enter the number of subintervals (n): ");
scanf("%d", &n);
printf("Enter the number of processes (p): ");
scanf("%d", &p);
}
// Broadcast the inputs to all processes
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&p, 1, MPI_INT, 0, MPI_COMM_WORLD);
a = 0.0;
b = 1.0;
h = (b - a) / n;
sum = 0.0;
// Divide the work among the processes
int interval_per_process = n / p;
int start = rank * interval_per_process;
int end = (rank + 1) * interval_per_process - 1;
for (i = start; i <= end; i++) {
x = a + h * (i + 0.5);
sum = sum + function(x);
}
sum = sum * h;
// Reduce the results from all processes to process 0
double global_sum;
MPI_Reduce(&sum, &global_sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
printf("The value of the integral is %.6lf\n", global_sum);
}
MPI_Finalize();
return 0;
}
So I need output like : It should ask for subintervals and processes and then give the final result i.e answer of integration using the midpoint rule.
But instead when i run it displays nohting and if enter any number it skips the scanner part and directly produce the output
I need a compiler
Im practising in parallel programming using MPI and i developed a programm that is calculating the average and how many numbers of the array are greater or fewer than the average. Unfortunately when im trying to run it on linux ubuntu system i get this *Caught signal 11 (Segmentation fault: address not mapped to object at address 0xff00000107), backtrace(tid: 5699).
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
int main(int argc, char** argv){
int p, plithos, i, k, count_max,count_min, sunexeia;
int *pinakas;
int *final_res;
int loc_matrix[100];
int loc_num;
int my_rank;
int root, local_sum, sum, j, source;
int tag1=50;
int tag2=60;
int tag3=70;
int tag4=80;
float average;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
if (my_rank == 0) {
printf("please give the size of the array: ");
scanf("%d", &plithos);
pinakas = (int *) malloc(plithos * sizeof(int));
for (i = 0; i < plithos; i++) {
printf("Give the %d number: ", i);
scanf("%d", &pinakas[i]);
}
}
root = 0;
MPI_Bcast(&plithos, 1, MPI_INT, root, MPI_COMM_WORLD); // here we are sending the size of the array to the processors
loc_num = plithos / p;
root = 0;
MPI_Scatter(&pinakas, loc_num, MPI_INT, &loc_matrix, loc_num, MPI_INT, root, MPI_COMM_WORLD); // here we are sending the amount of tasks that every processor must have
local_sum=0; // here all processors will calculate their sums
if (my_rank == 0) {
int start = 0;
int end = loc_num;
for (i = start; i < end; i++){
local_sum += pinakas[i];
}
}
else{
int start = my_rank * loc_num;
int end = my_rank + loc_num;
for (i = start; i < end; i++){
local_sum += pinakas[i];
}
}
MPI_Reduce(&local_sum, &sum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); //here with the reduce we are caclulating all the sums from all processors
if (my_rank == 0) { // processor 0 finds the average
average = (double)sum / plithos;
printf("The average is: %f\n", average);
}
root = 0;
MPI_Bcast(&average, 1, MPI_FLOAT, root, MPI_COMM_WORLD); //edo stelnoume tin mesi timi stis upoloipes diergasies
if (my_rank = 0){ //edo h diergasia 0 tha upologisei posa stoixeia exoun min kai max > tou average
for(i=0; i<plithos; i++){
if(pinakas[i]> average){
count_max = count_max +1;
}
if(pinakas[i]< average){
count_min = count_min +1;
}
}
printf("To plithos ton stoixeion pou exoun megaluteri timi apo tin mesi timi einai: %d", count_max);
printf("To plithos ton stoixeion pou exoun mikroteri timi apo tin mesi timi einai: %d", count_min);
}
MPI_Finalize();
}
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
int main(int argc, char **argv)
{
int N;
scanf("%d", &N);
double *a = (double *)malloc(N * sizeof(double));
int i, rank, size, tag = 99, tag1 = 100;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0)
{
for(int j=0;j<N;++j)
{
a[j] = j+0.1;
}
for (i = 1; i < size; i++)
{
MPI_Send(&N, 1, MPI_INT, i, tag1, MPI_COMM_WORLD);
MPI_Send(a, N, MPI_DOUBLE, i, tag, MPI_COMM_WORLD);
}
}
else
{
MPI_Recv(&N, 1, MPI_INT, 0, tag1, MPI_COMM_WORLD, &status);
MPI_Recv(a, N, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD, &status);
// for(int j=0;j<N*2;++j)
// printf("%d %f\n", rank, a[j]);
}
MPI_Barrier(MPI_COMM_WORLD);
printf("Message from process %d : %f\n", rank, a[rank]);
MPI_Finalize();
return 0;
}
I'm creating the array 'a' in 0th process and sending it to remaining proccesses. But I'm getting the following error upon doing this.
[nikhil:8599] *** An error occurred in MPI_Recv
[nikhil:8599] *** reported by process [4228579329,1]
[nikhil:8599] *** on communicator MPI_COMM_WORLD
[nikhil:8599] *** MPI_ERR_BUFFER: invalid buffer pointer
[nikhil:8599] *** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
[nikhil:8599] *** and potentially your MPI job)
[nikhil:08593] 2 more processes have sent help message help-mpi-errors.txt / mpi_errors_are_fatal
[nikhil:08593] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
Can anybody explain why I'm getting this error?
As you can see in the code there's a for loop containing a print statement which is commented. The weird thing is... upon uncommenting that loop. It's working fine.
Thoughts:
MPI_Init should be the first thing in your program.
Only one rank should scanf.
N is not communicated across ranks, so you are allocating memory of undefined size.
Define variables as close to their point of usage as possible. Putting int i at the top of your function is a disaster waiting to happen.
The barrier at the end is unnecessary.
All the ranks need to allocate their own memory.
That gets us to this code:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
int main(int argc, char **argv){
MPI_Init(&argc, &argv);
const int tag = 99;
const int tag1 = 100;
int rank, size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
double *a; //Pointer to the memory we will allocate
int N;
if (rank == 0){
scanf("%d", &N);
a = (double *)malloc(N * sizeof(double));
for(int j=0;j<N;++j){
a[j] = j+0.1;
}
for (int i = 1; i < size; i++){
MPI_Send(&N, 1, MPI_INT, i, tag1, MPI_COMM_WORLD);
MPI_Send(a, N, MPI_DOUBLE, i, tag, MPI_COMM_WORLD);
}
} else {
MPI_Status status;
MPI_Recv(&N, 1, MPI_INT, 0, tag1, MPI_COMM_WORLD, &status);
//Have to allocate memory on all ranks
a = (double *)malloc(N * sizeof(double));
MPI_Recv(a, N, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD, &status);
// for(int j=0;j<N*2;++j)
// printf("%d %f\n", rank, a[j]);
}
printf("Message from process %d : %f\n", rank, a[rank]);
MPI_Finalize();
return 0;
}
Doing it better
The broadcast command is your friend here:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define MPI_Error_Check(x) {const int err=x; if(x!=MPI_SUCCESS) { fprintf(stderr, "MPI ERROR %d at %d.", err, __LINE__);}}
int main(int argc, char **argv){
MPI_Init(&argc, &argv);
int rank, size;
MPI_Error_Check(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
MPI_Error_Check(MPI_Comm_size(MPI_COMM_WORLD, &size));
int N;
if (rank==0){
scanf("%d", &N);
}
MPI_Error_Check(MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD));
double *a = (double *)malloc(N * sizeof(double));
if(rank==0){
for(int j=0;j<N;++j){
a[j] = j+0.1;
}
}
printf("Message from process %d : N=%d\n", rank, N);
MPI_Error_Check(MPI_Bcast(a, N, MPI_DOUBLE, 0, MPI_COMM_WORLD));
fprintf(stderr, "Message from process %d : %f\n", rank, a[rank]);
free(a);
MPI_Finalize();
return 0;
}
Doing It Even Better
The fastest form of communication is no communication at all. In your case, once the value N is known each rank can recreate the data on its own:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define MPI_Error_Check(x) {const int err=x; if(x!=MPI_SUCCESS) { fprintf(stderr, "MPI ERROR %d at %d.", err, __LINE__);}}
int main(int argc, char **argv){
MPI_Init(&argc, &argv);
int rank, size;
MPI_Error_Check(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
MPI_Error_Check(MPI_Comm_size(MPI_COMM_WORLD, &size));
int N;
if (rank==0){
scanf("%d", &N);
}
MPI_Error_Check(MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD));
double *a = (double *)malloc(N * sizeof(double));
for(int j=0;j<N;++j){
a[j] = j+0.1;
}
printf("Message from process %d : N=%d\n", rank, N);
fprintf(stderr, "Message from process %d : %f\n", rank, a[rank]);
free(a);
MPI_Finalize();
return 0;
}
I'm new to coding and started learning MPI through examples. After executing with mpirun -np 7 ./lab1part2 , I encountered this problem.
I read a few other threads with similar error but non of them seems to be an identical case to me. I'm guessing that I made some simple error syntax wise but i just can't find it. Thanks in advance for any helpful input!
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
int Global_sum(int my_int, int my_rank, int comm_sz, MPI_Comm comm);
const int MAX_CONTRIB = 20;
int main(void) {
int i, sum, my_int;
int my_rank, comm_sz;
MPI_Comm comm;
int* all_ints = NULL;
MPI_Init(NULL, NULL);
comm = MPI_COMM_WORLD;
MPI_Comm_size(comm, &comm_sz);
MPI_Comm_rank(comm, &my_rank);
srandom(my_rank + 1);
my_int = random() % MAX_CONTRIB;
sum = Global_sum(my_int, my_rank, comm_sz, comm);
if ( my_rank == 0) {
all_ints = malloc(comm_sz*sizeof(int));
MPI_Reduce(&my_int, &all_ints, 1, MPI_INT, MPI_SUM, 0, comm);
printf("Ints being summed:\n ");
for (i = 0; i < comm_sz; i++)
printf("%d ", all_ints[i]);
printf("\n");
printf("Sum = %d\n",sum);
free(all_ints);
} else {
MPI_Reduce(&my_int, &all_ints, 1, MPI_INT, MPI_SUM, 0, comm);
}
MPI_Finalize();
return 0;
}
int Global_sum(
int my_int,
int my_rank,
int comm_sz,
MPI_Comm comm ) {
int partner, recvtemp;
int my_sum = my_int;
unsigned bitmask = 1;
while (bitmask < comm_sz) {
partner = my_rank ^ bitmask;
if (my_rank < partner) {
if (partner < comm_sz) {
MPI_Recv(&recvtemp, 1, MPI_INT, partner, 0, comm, MPI_STATUS_IGNORE);
my_sum += recvtemp;
}
bitmask <<= 1;
} else {
MPI_Send(&my_sum, 1, MPI_INT, partner, 0, comm);
break;
}
}
return my_sum;
}
I am trying to find the sum of an array of length 100 elements using MPI, under the restrictions of only using MPI_Send and MPI_receive , the code that I have written finds the sum of each processor but during the re-send to the main processor(rank=0) my code only receives from one processor
My Code
#include "stdafx.h"
#include <stdio.h>
#include <string.h>
#include "mpi.h"
#include "math.h"
int val = 1;
int main(int argc, char* argv[]) {
int my_rank;
int p;
int ierr;
int i;
int a[100];
int q=0;
for (i = 0; i <100; i++)
{
a[i] = i+1;
}
int send,recv;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
int part = 100 /(p-1);
if (my_rank == 0)
{
for (i = 1; i < p; i++)
{
send = part * (i-1);
MPI_Send(&send, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
}
}
else
{
MPI_Recv(&recv, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
for (i = recv; i < recv + part; i++)
{
val = val+a[i];
}
printf("%d\n", val);
MPI_Send(&val, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
if (my_rank == 0)
{
MPI_Recv(&val, 1, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
printf("%d", val);
q = q + val;
}
MPI_Finalize();
if (my_rank == 0)
{
printf("The output is %d\n", q);
}
return 0;
}
My output
where am i going wrong
Because you only recieve the result from one process. To recieve all results, iterate over process ranks:
if (my_rank == 0)
{
for (rank = 1; rank < proc_cnt; rank++)
{
MPI_Recv(&val, 1, MPI_INT, rank, 0, MPI_COMM_WORLD, &status);
printf("value of rank %d is %d", rank, val);
q = q + val;
}
}
Ordinarily, this a bad practice and may lead to deadlocks. Use mpi_gather() if allowed.