MPI gathering 2D subarrays - c

I know this has been answered many times before and there is a comprehensive answer here which I have read and attempted to use but I just can't get my code to work for some reason.
I have stripped my code down a bit to make it a bit easier to follow, but basically what I am trying to do is have each process initialise a sub-array and work on it, then put the whole big array back together on rank 0. MPI_Gatherv is giving me a segfault and I cannot figure out why.
Any help would be greatly appreciated.
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <mpi.h>
#define N 32
void init_lattice(double **site, int row, int col){
int i,j;
for(i=0; i<row; i++){
for(j=0; j<col; j++){
site[i][j]=(drand48()/4294967295.0 + 0.5)*2*M_PI;
}
}
}
int main(int argc, char *argv[]){
int nprocs, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
int dim = 2;
int grid[dim];
grid[0]=0;
grid[1]=0;
// Assign the grid dimensions
MPI_Dims_create(nprocs, dim, grid);
printf("Dim grid: length: %d, width: %d\n", grid[0], grid[1]);
// The new communicator
MPI_Comm comm_grid;
// Allow cyclic behavior
int periodic[dim];
periodic[0] = 1;
periodic[1] = 1;
// Create the communicator
MPI_Cart_create(MPI_COMM_WORLD, dim, grid, periodic, 0, &comm_grid);
int block_len, block_width;
block_len = N/grid[1];
block_width = N/grid[0];
int i, j;
//Create lattice subset
double *data = (double *) malloc (block_len * block_width * sizeof(double));
double **site = (double **) malloc (block_len * sizeof(double *));
for (i = 0; i < block_len; i++)
site[i] = & (data[i * block_width]);
//Initialise lattice
init_lattice(site, block_len, block_width);
MPI_Datatype newtype, subtype;
int sizes[dim];
sizes[0]=N;
sizes[1]=N;
int subsizes[dim];
subsizes[0] = block_len;
subsizes[1] = block_width;
int starts[dim];
starts[0] = 0;
starts[1] = 0;
MPI_Type_create_subarray(2, sizes, subsizes, starts, MPI_ORDER_C, MPI_DOUBLE, &newtype);
MPI_Type_create_resized(newtype, 0, N/grid[1]*sizeof(double), &subtype);
MPI_Type_commit(&subtype);
int sendcounts[grid[0]*grid[1]];
int displs[grid[0]*grid[1]];
if (rank == 0) {
for (i=0; i<grid[0]*grid[1]; i++) sendcounts[i] = 1;
int disp = 0;
for (i=0; i<grid[0]; i++) {
for (j=0; j<grid[1]; j++) {
displs[i*grid[0]+j] = disp;
disp += 1;
}
disp += ((N/grid[1])-1)*grid[0];
}
}
//Create global lattice
double *global_data = (double *) malloc (N * N * sizeof(double));
double **global_site = (double **) malloc (N * sizeof(double *));
for (i = 0; i < N; i++)
global_site[i] = & (global_data[i * N]);
MPI_Gatherv(&(site[0][0]), N*N/(grid[0]*grid[1]), MPI_DOUBLE, &(global_site[0][0]), sendcounts, displs, subtype, 0, MPI_COMM_WORLD);
if(rank==0){
printf("Rank: %d\n", rank);
for(i=0; i<N; i++){
for(j=0; j<N; j++){
printf("%.2lf ", global_site[i][j]);
}
printf("\n");
}
}
return 0;
}
EDIT:
Ok so I have changed my array allocations to contiguous memory and everything is working as it should now. Thanks talonmies!

The fundamental problem here is that MPI expects all allocations to be contiguous blocks of memory. Your site and global_site arrays are not, they are arrays of pointers. The MPI routines are just reading past the end of each individual row allocation and causing your segfault.
If you want to allocate an n x n array to use with the MPI then you need to replace this:
double **global_site;
if(rank==0){
global_site = malloc(sizeof(double *)*(N));
for(i=0; i<N; i++)
global_site[i] = malloc(sizeof(double)*(N));
}
with something like this:
double *global_site = malloc(sizeof(double)*(N * N));
You will obviously need to adjust the rest of your code accordingly.
It seems the only reason you are actually using arrays of pointers is for the convenience of [i][j] style 2D indexing. If you use linear or pitched linear memory, you can easily make a little preprocessor macro or helper function which can give you that style of indexing into row or column major ordered storage which is still compatible with MPI.

Related

MPI Scatter Array of Matrices Struct

I have an array of type Matrix structs which the program got from user's input. I need to distribute the matrices to processes with OpenMPI. I tried using Scatter but I am quite confused about the arguments needed for the program to work (and also how to receive the data in each local arrays). Here is my current code:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <mpi.h>
#define nil NULL
#define NMAX 100
#define DATAMAX 1000
#define DATAMIN -1000
typedef struct Matrix
{
int mat[NMAX][NMAX]; // Matrix cells
int row_eff; // Matrix effective row
int col_eff; // Matrix effective column
} Matrix;
void init_matrix(Matrix *m, int nrow, int ncol)
{
m->row_eff = nrow;
m->col_eff = ncol;
for (int i = 0; i < m->row_eff; i++)
{
for (int j = 0; j < m->col_eff; j++)
{
m->mat[i][j] = 0;
}
}
}
Matrix input_matrix(int nrow, int ncol)
{
Matrix input;
init_matrix(&input, nrow, ncol);
for (int i = 0; i < nrow; i++)
{
for (int j = 0; j < ncol; j++)
{
scanf("%d", &input.mat[i][j]);
}
}
return input;
}
void print_matrix(Matrix *m)
{
for (int i = 0; i < m->row_eff; i++)
{
for (int j = 0; j < m->col_eff; j++)
{
printf("%d ", m->mat[i][j]);
}
printf("\n");
}
}
int main(int argc, char **argv)
{
MPI_Init(&argc, &argv);
// Get number of processes
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
// Get process rank
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// Get matrices from user inputs
int kernel_row, kernel_col, num_targets, target_row, target_col;
// reads kernel's row and column and initalize kernel matrix from input
scanf("%d %d", &kernel_row, &kernel_col);
Matrix kernel = input_matrix(kernel_row, kernel_col);
// reads number of target matrices and their dimensions.
// initialize array of matrices and array of data ranges (int)
scanf("%d %d %d", &num_targets, &target_row, &target_col);
Matrix *arr_mat = (Matrix *)malloc(num_targets * sizeof(Matrix));
for (int i = 0; i < num_targets; i++)
{
arr_mat[i] = input_matrix(target_row, target_col);
}
// Get number of matrices per process
int num_mat_per_proc = ceil(num_targets / size);
// Init local matrices and scatter the global matrices
Matrix *local_mats = (Matrix *)malloc(num_mat_per_proc * sizeof(Matrix));
MPI_Scatter(arr_mat, sizeof(local_mats), MPI_BYTE, &local_mats, sizeof(local_mats), MPI_BYTE, 0, MPI_COMM_WORLD);
if (rank == 0)
{
// Range arrays -> array of convolution results
int arr_range[num_targets];
printf("From master \n");
for (int i = 0; i < 3; i++)
{
print_matrix(&arr_mat[i]);
}
}
else
{
printf("From slave %d = \n", rank);
print_matrix(&local_mats[0]);
}
MPI_Finalize();
}
So here's a few doubts I have about the current implementation:
Can I accept the input just like that or should I make it so that it only happens in rank 0?
How do I implement the scatter part and possibly using Scatterv because the amount of arrays might not be divisible to the number of processes?
Can I accept the input just like that or should I make it so that it
only happens in rank 0?
No, You should use command line arguments or read from file as best practice.
If you want to use scanf, then use it inside rank 0. STDIN is forwarded to rank 0 (this is not supported in standard as far as I know, But I guess this should work and will be implementation dependent)
How do I implement the scatter part and possibly using Scatterv
because the amount of arrays might not be divisible to the number of
processes?
If you different size to send for different processes, then you should use scatterv.
Scatter Syntax:
MPI_Scatter(
void* send_data,
int send_count,
MPI_Datatype send_datatype,
void* recv_data,
int recv_count,
MPI_Datatype recv_datatype,
int root,
MPI_Comm communicator)
Your usage:
MPI_Scatter(arr_mat, sizeof(local_mats), MPI_BYTE, &local_mats, sizeof(local_mats), MPI_BYTE, 0, MPI_COMM_WORLD);
Potential error points:
In send_count: Size to send (as Gilles Gouaillardet Pointed out in comments). Sizeof(local_mats) instead it should be num_mat_per_proc * sizeof(Matrix).
recv_count: I believe size to receive should not be sizeof(local_mats).
Since you use the same type (MPI_BYTES) for SEND and RECV, your send_count == recv_count

C - Segmentation fault using Scatterv with dynamic 2D array

I'm trying to work with 2D arrays and MPI_Scatterv. When I call MPI_Scatterv I get
================================================================================
= BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES
= PID 5790 RUNNING AT ubuntu
= EXIT CODE: 139
= CLEANING UP REMAINING PROCESSES
= YOU CAN IGNORE THE BELOW CLEANUP MESSAGES
================================================================================
YOUR APPLICATION TERMINATED WITH THE EXIT STRING: Segmentation fault (signal 11)
This typically refers to a problem with your application.
Please see the FAQ page for debugging suggestions
If I use C99 2D arrays it works, but not with malloc. I want to know where I'm wrong with malloc. I can't use linearized 2D array, so I can't create array like array[i*columns+j]
Here is a test program:
int **alloc2d(int n, int m) {
int i;
int **array = malloc(n * sizeof(int*));
array[0] = malloc(n * m * sizeof(int));
for(i = 1; i < n; i++)
array[i] = array[i-1] + m;
return array;
}
int *genSendc(int dim, int numprocs) {
int* sendc = (int*)malloc(sizeof(int)*numprocs);
int i;
int subsize = dim/numprocs;
for(i=0; i<numprocs; ++i)
sendc[i] = subsize;
for(i=0; i<dim-subsize*numprocs; ++i)
sendc[i]+=1;
return sendc;
}
int *genDispl(int numprocs, int*sendc) {
int* displ = (int*)malloc(sizeof(int)*numprocs);
int i;
displ[0]=0;
for(i=1; i<numprocs; ++i)
displ[i] = displ[i-1]+sendc[i-1];
return displ;
}
int main(int argc, char *argv[]){
int numprocs, rank, i, j, N=5, M=4;
int* displMat, *sendcMat;
int **txMatrix, **rxMatrix;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
sendcMat = genSendc(N, numprocs);
for(i=0; i<numprocs; ++i)
sendcMat[i] *= M;
displMat = genDispl(numprocs, sendcMat);
rxMatrix = alloc2d(sendcMat[rank]/M, M);
if (rank == 0) {
srand(time(NULL));
txMatrix = alloc2d(N, M);
for (i=0; i < N; ++i)
for(j=0; j < M; ++j)
txMatrix [i][j] = (rand() % 10)+1;
}
MPI_Scatterv(&txMatrix[0][0], sendcMat, displMat, MPI_INT, &rxMatrix[0][0], sendcMat[rank], MPI_INT, 0, MPI_COMM_WORLD);
MPI_Finalize();
}
If I print rxMatrix after MPI_Scatterv, the program prints Rank0 sub-matrix and then it crashes with segmentation fault. Where am I wrong?
This expression invokes undefined behavior if txMatrix is not properly initialized.
&txMatrix[0][0]
While the first argument to MPI_Scatterv is inconsequential on non-root ranks*, just evaluating the expression can cause a segfault. Just use an if/else for root/nonroot and pass NULL for the latter.
*: at least per the standard, I've seen this be bugged in MPI implementations.

MPI Subarray Sending Error

I firstly initialize a 4x4 matrix and then try to send the first 2x2 block to the slave process by using MPI in C. However the slave process only receives the first row of the block, the second row is filled with random numbers from computer ram. I couldn't find what is missing. The code of the program is below :
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define SIZE 4
int main(int argc, char** argv)
{
int rank, nproc;
const int root = 0;
const int tag = 3;
int** table;
int* datas;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
datas = malloc(SIZE * SIZE * sizeof(int));
table = malloc(SIZE * sizeof(int*));
for (int i = 0; i < SIZE; i++)
table[i] = &(datas[i * SIZE]);
for (int i = 0; i < SIZE; i++)
for (int k = 0; k < SIZE; k++)
table[i][k] = 0;
table[0][1] = 1;
table[0][2] = 2;
table[1][0] = 3;
table[2][3] = 2;
table[3][1] = 3;
table[3][2] = 4;
if (rank == root){
MPI_Datatype newtype;
int sizes[2] = { 4, 4 }; // size of table
int subsizes[2] = { 2, 2 }; // size of sub-region
int starts[2] = { 0, 0 };
MPI_Type_create_subarray(2, sizes, subsizes, starts, MPI_ORDER_C, MPI_INT, &newtype);
MPI_Type_commit(&newtype);
MPI_Send(&(table[0][0]), 1, newtype, 1, tag, MPI_COMM_WORLD);
}
else{
int* local_datas = malloc(SIZE * SIZE * sizeof(int));
int** local = malloc(SIZE * sizeof(int*));
for (int i = 0; i < SIZE; i++)
local[i] = &(local_datas[i * SIZE]);
MPI_Recv(&(local[0][0]), 4, MPI_INT, root, tag, MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
for (int i = 0; i < 2; i++){
for (int k = 0; k < 2; k++)
printf("%3d ", local[i][k]);
printf("\n");
}
}
MPI_Finalize();
return 0;
}
You have instructed the receive operation to put four integer values consecutively in memory and therefore the 2x2 block is converted to a 1x4 row upon receive (since local is 4x4). The second row of local contains random values since the memory is never initialised.
You should either make use of MPI_Type_create_subarray in both the sender and the receiver in order to place the received data in a 2x2 block or redefine local to be a 2x2 matrix instead of 4x4.

Dynamic Matrix Multiplication with Pthreads

I'm a beginner with Thread Programming and C in general and I'm trying to figure out how to do a simple Matrix Multiplication with Pthreads. I want to create a thread for every column and put the results in a Result Matrix. I'm trying to do it dynamicly, which means the user is allowed to use an input as a size to create two n x n matrices.
My code right now, excluding filling the matrix and reading the size n is the following:
#include <pthread.h>
#include <stdio.h>
#include<stdlib.h>
typedef struct Matrix {
int line, col, size;
double (*MA)[];
double (*MB)[];
double (*MC)[];
} Matrix;
void *multiply(void *arg) {
Matrix* work = (Matrix*) arg;
int s, z;
s = work->col;
z = work->line;
work->MC[0][0] = 0.0.//can't use MC, MB, MA here!!
return 0;
}
int main() {
Matrix* m;
//read size and set it to int size (miissing here, does work)
double MA[size][size], MB[size][size], MC[size][size];
int i, j;
//filling the matrices (missing here, does work)
pthread_t threads[size];
for (i = 0; i < size; i++) {
m = malloc(sizeof(Matrix*));
m->size = size;
m->col = i;
pthread_create(&threads[i], NULL, multiply, m);
}
for (i = 0; i < size; i++) {
pthread_join(threads[i], NULL);
}
return 0;
}
The problem is, that I cant use neither MA, MB nor NC(:= the result) in the multiply method with something like its shown in the code.
I just get the error "invalid use of array with unspecific bounds" even though I declared all three of them in the main method.
Do I understand anything wrong here or how can I fix that? I tried to adapt a example of my lecture where a thread for every element will be created.
Thanks in advance!
Just about the error:
work->MC[0][0] = 0.0.//can't use MC, MB, MA here!!
MC was declared as double (*MC)[] and you try to use it as a two dimensional array like you had declared it double MC[N]{M]. You can use a two (or more) dimensional array like you did if and only if the first dimension was fixed or if you alloc it row by row.
So your program could be:
#include <pthread.h>
#include <stdio.h>
#include<stdlib.h>
typedef struct Matrix {
int line, col, size;
double MA[][];
double MB[][];
double MC[][];
} Matrix;
void *multiply(void *arg) {
Matrix* work = (Matrix*) arg;
int s, z;
s = work->col;
z = work->line;
work->MC[0][0] = 0.0
return 0;
}
int main() {
Matrix* m;
//read size and set it to int size (miissing here, does work)
double MA[][], MB[][], MC[][];
int i, j;
pthread_t threads[size];
MA = (double **) malloc(size * sizeof(double *));
MB = (double **) malloc(size * sizeof(double *));
MC = (double **) malloc(size * sizeof(double *));
for(int i=0;i<size;++i){
MA[i] = (double *) malloc(size * sizeof(double));
MB[i] = (double *) malloc(size * sizeof(double));
MC[i] = (double *) malloc(size * sizeof(double));
}
for (i = 0; i < size; i++) {
m = malloc(sizeof(Matrix*));
m->MA = MA;
m->MB = MB;
m->MC = MC;
m->size = size;
m->col = i;
pthread_create(&threads[i], NULL, multiply, m);
}
for (i = 0; i < size; i++) {
pthread_join(threads[i], NULL);
}
return 0;
}
But you must TAKE CARE that the thread can access to the data concurrently and so you should use some locks if different threads can use and change same values.

How do I use MPI to scatter a 2d array?

I am trying to use MPI to distribute the work for bucket sort. When I scatter the array, I wanted each process to receive a single bucket (int array) and be able to print its content. However, my current program prints out incorrect values, which make me think I am not indexing into the memory I want. Can someone help explain how I can properly index into the array I am passing to each process or how I am doing this incorrectly?
#define MAX_VALUE 64
#define N 32
main(int argc, char *argv[]){
MPI_Init(&argc, &argv); //initialize MPI environment
int** sendArray = malloc(16*sizeof(int *));
int *arrayIndex = (int *) malloc(16*sizeof(int));
int *receiveArray = (int *) malloc(N*sizeof(int));
int nps, myrank;
MPI_Comm_size(MPI_COMM_WORLD, &nps);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
int i;
if(myrank == 0)
{
//create an array that stores the number of values in each bucket
for( i = 0; i < 16; i++){
arrayIndex[i] = 0;
}
int bucket =0;
int temp = 0;
//creates an int array within each array index of sendArray
for( i = 0; i < 16; i++){
sendArray[i] = (int *)malloc(N * sizeof(int));
}
//Create a random int array with values ranging from 0 to MAX_VALUE
for(i = 0; i < N; i++){
temp= rand() % MAX_VALUE;
bucket = temp/4;
printf("assigning %d to index [%d][%d]\n", temp, bucket, arrayIndex[bucket]);
sendArray[bucket][arrayIndex[bucket]]= temp;
arrayIndex[bucket] = arrayIndex[bucket] + 1;
}
MPI_Scatter(sendArray, 16, MPI_INT, receiveArray, N, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(arrayIndex, 16, MPI_INT, 0, MPI_COMM_WORLD);
printf("bucket %d has %d values\n", myrank, arrayIndex[myrank]);
for( i = 0; i < arrayIndex[myrank]; i++){
printf("bucket %d index %d has value %d\n", myrank, i, receiveArray[i]);
}
}
What you are trying to do doesn't work because MPI always sends only the data you point to. It does not follow the pointers in your sendArray.
In your example you could just make your SendArray bigger, namely 16 * N and put all your data into that continuous array. That way you have a one-dimensional array, but that should not be a Problem in the code you gave us because all buckets have the same length, so you can access element j from bucket i with sendArray[i * N + j].
Also, in most cases send_count should be equal to recv_count. In your case that would be N. The correct MPI call would be
MPI_Scatter(sendArray, N, MPI_INT, receiveArray, N, MPI_INT, 0, MPI_COMM_WORLD);

Resources