Hallo Iam trying to make a simlpe parralel program in C language uing MPI. Program should find maximum in array. Root process should send chunks of array to all processes using MPI_Scatter and then gather results by MPI_Gather. When I run the program i get general error like this:
Perhaps this Unix error message will help:
Unix errno: 14
Bad address
I know that there is some problem with MPI_Scatter and MPI_Gather or with the values I am sending to this functions.
I was trying to find the solution, but I found nothing what could be useful.
Here is my code:
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define BUFSIZE 9
int max(int *buf, int N){
int i;
int value = 0;
for(i=0; i<N; i++){
if (buf[i]>value){
value = buf[i];
}
}
return value;
}
int main(int argc, char** argv)
{ int size, rank;
int slave;
int *buf;
int *buf1;
int *buf2;
int i, n, value;
MPI_Status status;
/* Initialize MPI */
MPI_Init(NULL, NULL);
/*
* Determine size in the world group.
*/
MPI_Comm_size(MPI_COMM_WORLD, &size);
if ((BUFSIZE % size) != 0) {
printf("Wrong Bufsize ");
return(0);
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank==0) {
buf = (int *)malloc(BUFSIZE*sizeof(int));
buf2 = (int *)malloc(size*sizeof(int));
printf("\n Generated array: \n");
for(i=0; i<BUFSIZE; i++){
buf[i] = rand() % 20;
printf("%d, ", buf[i]);
}
printf("\n");
printf("\n Sending values to processes:");
printf("\n -----------------------------");
}
buf1 = (int *)malloc((BUFSIZE/size)*sizeof(int));
MPI_Scatter(buf, BUFSIZE/size, MPI_INT, buf1, BUFSIZE/size, MPI_INT, 0, MPI_COMM_WORLD);
value = max(&buf1[0], BUFSIZE/size);
printf("\n Max from rocess %d : %d \n", rank, max(&buf1[0], BUFSIZE/size));
MPI_Gather(&value, 1, MPI_INT, buf2, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0){
printf("\n Max value: %d", max(&buf2[0], size));
}
MPI_Finalize();
return(0);
}
Initialize your pointers to NULL, and track them.
use buf1 instead of &buf1[0], is more clear.
free your buffers before MPI_Finalize() with:
if(bufferPionter != NULL) free(bufferPionter);
If something is wrong with a pointer will crash in the free call. In the max function, If all your numbers are less than zero the maximun is zero. i fix that.
int max(int *buf, int N){
int i;
int value = N? buf[0] : 0;
for(i=0; i<N; i++){
if (buf[i]>value){
value = buf[i];
}
}
return value;
}
Best regards!
Related
I'm trying to use MPI to broadcast an array to nodes 1 and 2 from node 0. The array has values in it, however, I cannot seem to successfully broadcast the array despite trying a few different things based on suggestions found here and elsewhere on the net. When I run this, which asks for a file name within the same directory we're running the code from (that file is guaranteed to contain only integers, 1-per-line) I end up with a "Fatal error: glibc detected an invalid stdio handle", and it happens around my MPI_Bcast(M, N, MPI_INT, 0, MPI_COMM_WORLD) line, but I can't seem to pinpoint or correct the problem.
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <mpi.h>
int fuelCalc(int array[], int elementCount, int myrank, int worldsize){
int fuelSum = 0;
int addlFuelReq = 0;
printf("Rank %d has array[0] value %d\n", myrank, array[0]);
for(int i = 0; i < elementCount; i++){
if( (i % worldsize) == myrank){
usleep(1000000);
addlFuelReq = (array[i]/4) - 3;
if(addlFuelReq < 1){addlFuelReq = 1;}
fuelSum += addlFuelReq;
}
}
return fuelSum;
}
int main(){
int i = 0, N = 0;
char fuelFile[30];
char comp;
int totalFuel;
int myrank, worldsize;
FILE *file;
int mysum;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &worldsize);
if(myrank == 0){
printf("What file should be used?\n");
scanf("%s", fuelFile);
file = fopen(fuelFile, "r");
if(file == NULL){
printf("The file entered does not exist.\n");
return 0;
}
for(comp = getc(file); comp!=EOF;comp=getc(file)){
if(comp == '\n'){
N = N+1;
}
}
printf("# of Lines: %d\n", N);
}
MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);
printf("rank %d has N value %d\n", myrank, N);
int M[N];
fseek(file, 0, SEEK_SET);
for( i=0; i<N; i++){
fscanf(file, "%d\n", &M[i]);
}
if(myrank==0){printf("M[0] = %d\n", M[0]);}
if(myrank==0){printf("Successfully scanned file in to M\n");}
MPI_Bcast(M, N, MPI_INT, 0, MPI_COMM_WORLD);
printf("Rank %d has M[0] of %d\n", myrank, M[0]);
mysum = fuelCalc(M, N, myrank, worldsize);
if(myrank==0){printf("Successfully sent M, N, myrank, worldsize to other nodes\n");}
MPI_Reduce(&mysum, &totalFuel,1,MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if(myrank==0){printf("Successfully reduced stuff\n");}
if(myrank == 0){
totalFuel = (totalFuel+((totalFuel/100)*10));
printf("Total fuel required: %d\n", totalFuel);
}
MPI_Finalize();
}
As far as I can tell, you only open the file on rank 0 so the call to "fseek" will fail on all other processes as the file pointers will be invalid.
I am trying to implement a map where keys are numbers mapping into unique numbers. In other words, each process holds a set of numbers in an array that map into another set of numbers in another array held by the same process. The mappings need to be unique across all the process. I passed around a struct with the mappings to create mappings for each of the processes. However, this is not parallel, as I sequentially send information through processes. I request help from all of you wonderful programmers of the internet for how all processes can look at a specific variable at the same time? The following is the code I am currently working with. Thanks in advance and for all the support I have received till now.
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
struct map{ //saves all the mappings
int keys[25];
int values[25];
int grow;
};
struct map rmap; //global map
void mapadd(int key, int value){ //adding values to map
rmap.keys[rmap.grow] = key;
rmap.values[rmap.grow] = value;
rmap.grow++;
}
int mapper(int key){ //get value from key
for(int h=0; h<sizeof(rmap.keys)/sizeof(int); h++){
if(rmap.keys[h] == key){
return rmap.values[h];
}
}
return 0;
}
int finder(int list[], int val, int mem){ //see if a value is in array
for(int l=0; l<mem; l++){
if(list[l] == val){
return 1;
}
}
return 0;
}
int main(int argc, char** argv){
// Initialize the MPI environment
MPI_Init(NULL, NULL);
// Find out rank, size
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
srand(time(0));
rmap.grow = 0;
int dim[world_size];
int maxdim = 0;
for(int s=0; s<world_size; s++){
dim[s] = (rand()%10) + 1;
if(dim[s]>maxdim){
maxdim = dim[s];
}
}
int nums[world_size][maxdim];
int labels[world_size][maxdim];
for(int u=0; u<world_size; u++){
for(int d=0; d<dim[u]; d++){
labels[u][d] = 0;
nums[u][d] = 0;
}
}
for(int t=0; t<world_size; t++){
for(int i=0; i<dim[t]; i++){
nums[t][i] = rand()%26 + 1;
//printf("%d\n", nums[t][i]);
}
}
if(world_rank!=0){
MPI_Recv(&rmap.keys, 25, MPI_INT, world_rank-1, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&rmap.values, 25, MPI_INT, world_rank-1, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
for(int j=0; j<dim[world_rank]; j++){
if(labels[world_rank][j] == 0){
if(finder(rmap.keys, nums[world_rank][j], 25)==1){
//printf("%s", "exist");
labels[world_rank][j] = mapper(nums[world_rank][j]);
}
else{
//printf("%s", "not");
labels[world_rank][j] = (rand()%50) + 1;
mapadd(nums[world_rank][j], labels[world_rank][j]);
/*for(int o=0; o<25; o++){
printf("%d - %d", rmap.keys[o], rmap.values[o]);
}*/
}
}
}
if(world_rank<world_size-1){
MPI_Send(&rmap.keys, 25, MPI_INT, world_rank+1, 0, MPI_COMM_WORLD);
MPI_Send(&rmap.values, 25, MPI_INT, world_rank+1, 0, MPI_COMM_WORLD);
}
for(int rank=0; rank<world_size; rank++){
if(rank==world_rank){
for(int k=0; k<dim[rank]; k++){
printf("Process #%d: %d --> %d\n", rank, nums[rank][k], labels[rank][k]);
}
}
}
MPI_Finalize();
return 0;
}
For some reason MPI_Waitall is waiting forever when I enter 10000 as the length for the sequence. Basically I create 4 lists of length n/4 where in this case n is 10000 and I an using non-blocking send so my process 0 does not wait for each list to be sent separately as they do not share any values so they are not overwritten.
Keep in mind that the program works with smaller numbers like 1000 or 100 but I am not sure why it does not work with 10000+.
Here is my code:
#include "ore_header.h"
int main(int argc, char** argv) {
srand(time(NULL));
int my_rank, p;
void generate_sequence(int *arr, int n);
int subsequence_check(int *arr,int n, int m);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
int total;
int length;
int flag;
int seq_length;
MPI_Status stats[p];
MPI_Request reqs[p];
int p_length=0;
int *buf[p];
if (my_rank == 0) {
printf("Enter length and sequence length\n");
scanf("%d %d",&length, &seq_length);
p_length = length / p;
for (int i = 0; i < p; i++) {
buf[i] = (int*)malloc(p_length*sizeof(int));
generate_sequence(buf[i], p_length);
MPI_Isend(buf[i], p_length, MPI_INT, i, 0, MPI_COMM_WORLD, &reqs[i]);
printf("Data sent to process %d\n", i);
}
MPI_Waitall(p, reqs, stats); //Program wont go past this line
printf("\n\n Data sent to all processes \n\n");
}
MPI_Bcast(&p_length, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&seq_length, 1, MPI_INT, 0, MPI_COMM_WORLD);
buf[my_rank] = (int*)malloc(p_length*sizeof(int));
MPI_Recv(buf[my_rank], p_length, MPI_INT, 0, 0, MPI_COMM_WORLD, &stats[my_rank]);
printf("\nData received on process: %d Length: %d\n",my_rank,p_length);
//for (int i = 0; i < p_length; i++) {
// printf("%d",buf[my_rank][i]);
//}
//printf("\n");
total = subsequence_check(buf[my_rank],p_length,seq_length);
printf("\nI am process: %d\nTotal: %d\n",my_rank,total);
MPI_Finalize();
return (0);
}
I have a code, which counts the average value of integers in MPI:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
#include <assert.h>
// Average method
int compute_avg(int *array, int num_elements) {
int sum = 0;
int i;
for (i = 0; i < num_elements; i++) {
sum += array[i];
}
return sum / num_elements;
}
int main(int argc, char** argv) {
if (argc != 2) {
fprintf(stderr, "Usage: avg num_elements_per_proc\n");
exit(1);
}
int num_elements_per_proc = atoi(argv[1]);
MPI_Init(NULL, NULL);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Create array with integers
int *nums = NULL;
if (world_rank == 0) {
for (int i =0; i<5; i++){
nums[i] = i;
}
}
// Subtable from any processes
int *sub_nums = (int *)malloc(sizeof(int) * num_elements_per_proc);
assert(sub_nums != NULL);
// distribution numbers for all processes
MPI_Scatter(nums, num_elements_per_proc, MPI_INT, sub_nums,
num_elements_per_proc, MPI_INT, 0, MPI_COMM_WORLD);
// Count avg subtable
int sub_avg = compute_avg(sub_nums, num_elements_per_proc);
// Collectiong averages
int *sub_avgs = NULL;
if (world_rank == 0) {
sub_avgs = (int *)malloc(sizeof(int) * world_size);
assert(sub_avgs != NULL);
}
MPI_Gather(&sub_avg, 1, MPI_INT, sub_avgs, 1, MPI_INT, 0, MPI_COMM_WORLD);
// Calculates the overall average
if (world_rank == 0) {
int avg = compute_avg(sub_avgs, world_size);
printf("Avg of all elements is %d\n", avg);
// Obliczenie średniej na danych oryginalnych i wyświetlenie.
int original_data_avg =
compute_avg(nums, num_elements_per_proc * world_size);
printf("Avg computed across original data is %d\n", original_data_avg);
}
// free memory
if (world_rank == 0) {
free(nums);
free(sub_avgs);
}
free(sub_nums);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
When i try to run this (mpirun -c 4 avg 4), i`m getting the error list:
[mangeke-mpi-2431940:03372] * Process received signal *
[mangeke-mpi-2431940:03372] Signal: Segmentation fault (11)
[mangeke-mpi-2431940:03372] Signal code: Address not mapped (1)
[mangeke-mpi-2431940:03372] Failing at address: (nil)
[mangeke-mpi-2431940:03372] * End of error message *
How i can fix this problem?
As Hristo comments, the nums is initialized to NULL. If you explore the core file generated with the debugger, it raises the following statement
Core was generated by `./m 4'. Program terminated with signal SIGSEGV,
Segmentation fault.
#0 0x0000000000408809 in main (argc=2, argv=0x7ffd4fc87e68) at m.cxx:36 36 nums[i] = i;
if you change the following code as shown below you'll get to make it run without segfaulting.
....
// Create array with integers
int nums[num_elements_per_proc]; // <<-- change here
if (world_rank == 0) {
for (int i =0; i<5; i++){
nums[i] = i;
}
}
....
// free memory
if (world_rank == 0) {
// free(nums); // <<-- change here, free not needed
free(sub_avgs);
}
I am trying to find a maximum element of an array using MPI in C language. I have to compare the time it takes to send and calculation of the maximum using vs MPI_Scatter functions. MPI_Send: Here' the code for the MPI_Scatter function it works great:
#include "mpi.h"
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define lim 20
//returns "a-b" in seconds
double timeval_diff(struct timeval *a, struct timeval *b)
{
return
(double)(a->tv_sec + (double)a->tv_usec/1000000) -
(double)(b->tv_sec + (double)b->tv_usec/1000000);
}
//Array to be divided among the processes
int buf[lim]=
{27,24,3,8,45,10,50,15,10,11,9,48,69,25,19,29,61,72,93,20};
int buf2[lim];
int buf3[lim];
int max;
int main(int argc, char *argv[])
{
struct timeval t_ini, t_fin;
double secs;
int n, myid, numprocs, i,j;
int namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Get_processor_name(processor_name,&namelen);
fprintf(stderr,"Process %d in %s\n",myid, processor_name);
/*Check Border Conditions */
n=lim/numprocs;
gettimeofday(&t_ini, NULL); //take the time before sending the buffer with Scatter
MPI_Scatter(buf,n, MPI_INT,buf2,n,MPI_INT, 0, MPI_COMM_WORLD);
gettimeofday(&t_fin, NULL);//take the time to complete the send routine
secs = timeval_diff(&t_fin, &t_ini);
MPI_Reduce(buf2,buf3,n, MPI_INT, MPI_MAX, 0,MPI_COMM_WORLD);
if (myid == 0)
{ max = buf3[0];
for (i=1; i<n ; i++)
if (max < buf3[i]) max = buf3[i];
for (i=0; i<n ; i++)
printf("Buf3[%d]= %d \n", i, buf3[i]);
printf("Max number of the array is: %d \n", max);
}
for (i=0; i<n ; i++){
printf("%d,Buf2[%d]= %d \n",myid, i,buf2[i]);}
printf("%.16g milliseconds\n", secs * 1000.0);
MPI_Finalize();
return 0;
}
The problem comes when I try to do the same procedure with the MPI_Send function because I calculated the maximum array elements, what am I doing wrong?:
#include "mpi.h"
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define lim 20
//returns "a-b" in seconds
double timeval_diff(struct timeval *a, struct timeval *b)
{
return
(double)(a->tv_sec + (double)a->tv_usec/1000000) -
(double)(b->tv_sec + (double)b->tv_usec/1000000);
}
//Array to be divided among the processes
int buf[lim]=
{27,24,3,8,45,10,50,15,10,11,9,48,69,25,19,29,61,72,93,20};
int buf2[lim];
int buf3[lim];
int max;
int main(int argc, char *argv[])
{
struct timeval t_ini, t_fin;
double secs;
int n, myid, numprocs, i,j;
int namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Get_processor_name(processor_name,&namelen);
fprintf(stderr,"Process %d in %s\n",myid, processor_name);
/*Check Border Conditions */
n=lim/numprocs;
gettimeofday(&t_ini, NULL); //take the time before sending the buffer with Scatter
for (j=0;j<n;j++){
MPI_Send(buf, lim, MPI_INT, 1, 111, MPI_COMM_WORLD);
}
gettimeofday(&t_fin, NULL);//take the time to complete the send routine
secs = timeval_diff(&t_fin, &t_ini);
if (myid == 0)
{ max = buf3[0];
for (i=1; i<n ; i++)
if (max < buf3[i]) max = buf3[i];
for (i=0; i<n ; i++)
printf("Buf3[%d]= %d \n", i, buf3[i]);
printf("Max number of the array is: %d \n", max);
}
for (i=0; i<n ; i++){
printf("%d,Buf2[%d]= %d \n",myid, i,buf2[i]);}
printf("%.16g milliseconds\n", secs * 1000.0);
MPI_Finalize();
return 0;
}
I wasted some hours watching Where is the fault but I can not see it ... Any help?
you are missing the MPI_Recv call on the other end of your MPI_Send call, these kind of functions are more low level as opposed to the collective scatter, gather, reduce and broadcast functions