I'm new in openmpi and I don't know how to use scatter and gather to send an array of strings to all processors. I would like to divide an array and send it to each processor, but all I can divide are the characters of a single array element. Can anyone help me please?
Here is my code:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mpi.h"
#define MASTER 0
#define BUF_SIZE 2048
#define CHAR_SIZE 900
#define CHARS 13
#define MAX_SIZE 3500
#define NUMBER_OF_FILES 2
int main(int argc, char** argv) {
int number_of_words = 0;
int total_rows = 0;
int i, j = 0;
char **words = (char**) calloc(MAX_SIZE, sizeof (char*));
for (i = 0; i < MAX_SIZE; i++) {
words[i] = (char*) calloc(CHARS, sizeof (char));
}
char **local_words = (char**) calloc(MAX_SIZE, sizeof (char*));
for (i = 0; i < MAX_SIZE; i++) {
local_words[i] = (char*) calloc(CHARS, sizeof (char));
}
char **rec_words = (char**) calloc(MAX_SIZE, sizeof (char*));
for (i = 0; i < MAX_SIZE; i++) {
rec_words[i] = (char*) calloc(CHARS, sizeof (char));
}
char str_righe[BUF_SIZE][CHAR_SIZE];
FILE *f = NULL;
char f_title[10];
char str_nfiles[10];
char delim[10] = {10, 32, 33, 39, 44, 46, 58, 59, 63};
char *ptr;
int rank;
int size;
int message_length;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 1; i <= NUMBER_OF_FILES; i++) {
strcpy(f_title, "f");
sprintf(str_nfiles, "%d", i);
strcat(f_title, str_nfiles);
strcat(f_title, ".txt");
f = fopen(f_title, "r");
while (fgets(str_righe[j], BUF_SIZE, f)) {
str_righe[j][strlen(str_righe[j])] = '\0';
j++;
}
fclose(f);
}
total_rows = j;
for (i = 0; i < total_rows; ++i) {
ptr = strtok(str_righe[i], delim);
while (ptr != NULL) {
strcpy(words[number_of_words], ptr);
ptr = strtok(NULL, delim);
number_of_words++;
}
}
message_length = number_of_words / size;
if (rank == MASTER) {
for (i = 0; i < number_of_words; i++)
printf("%s\n", words[i]);
}
MPI_Scatter(*words, message_length, MPI_CHAR, *local_words, message_length, MPI_CHAR, MASTER, MPI_COMM_WORLD);
printf("rank %d, fragment: \t%s\n", rank, *local_words);
MPI_Gather(*local_words, message_length, MPI_CHAR, *rec_words, message_length, MPI_CHAR, MASTER, MPI_COMM_WORLD);
if (rank == MASTER) {
printf("rank %d, gathered: \t%s\n", rank, *rec_words);
}
MPI_Finalize();
return EXIT_SUCCESS;
}
I expect the output:
iMac-di-iMac01:mpi macbook$ mpirun -n 2 main
Good
time
by
antonio
rank 0, fragment: Good time
rank 1, fragment: by antonio
rank 0, gathered: Good time by antonio
But the actual output is:
iMac-di-iMac01:mpi macbook$ mpirun -n 2 main
Good
time
by
antonio
rank 0, fragment: Go
rank 1, fragment: od
rank 0, gathered: Good
I realized that I never shared the solution to the problem. I do it now:
I created the matrix variable and I sent the one with the scatter. In this way the slaves received the words and not the characters
int *matrix = 0;
matrix = malloc(sizeof (int) * n_words);
j = 1;
for (i = 0; i <= n_words; i++) {
matrix[i] = j;
j++;
}
n_words_cpu = n_words / (size);
procRow = malloc(sizeof (int) * n_words); // received row will contain p integers
MPI_Scatter(
/* send_data = */ matrix,
/* send_count = */ n_words_cpu,
/* send_datatype = */ MPI_INT,
/* recv_data = */ procRow,
/* recv_count = */ n_words_cpu,
/* recv_datatype = */ MPI_INT,
/* root = */ MASTER,
/* MPI_commuicator = */ MPI_COMM_WORLD);
Related
i tried to read a txt file and count all a to z each letter count without considering uppercase and lowercase. i need to ignore other characters and spaces. but i cannot get correct number count with changig the process count. process need to be in between 1 to 100. with 2 number of process it will show correct count but when i increase the number it shows wrong count.
//FILE READING USING MPI FUNCTION
#include <stdio.h>
#include <mpi.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char* argv[]) {
int size;
int rank;
int tag = 0;
int start;
int letterCounts[26];
MPI_Status status;
int chunksize;
MPI_Offset file_size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(size >= 101){
printf(" Process failed");
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_File file;
MPI_File_open(MPI_COMM_WORLD, "warandpeace.txt", MPI_MODE_RDONLY, MPI_INFO_NULL, &file);
if (file == MPI_FILE_NULL) {
printf("Error opening file!\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
// find the file size
MPI_File_get_size(file, &file_size);
//printf("Process %d: filesize %d\n", rank,file_size);
// Allocate a buffer to hold the file contents
char* buffer = (char*)malloc(file_size * sizeof(char));
if(rank==0){
// Read the file into the buffer
MPI_File_read(file, buffer, file_size, MPI_CHAR, MPI_STATUS_IGNORE);
// Close the file
//MPI_File_close(&file);
//free(buffer);
//deviding the file
chunksize = file_size/(size - 1);
//printf("chunksize is %d\n",chunksize);
int end = chunksize;
for (int i = 1; i < size; i++) {
int start = 0;
MPI_Send(&buffer[start], end - start, MPI_CHAR, i, 0, MPI_COMM_WORLD);
start = end;
end += chunksize;
if (i == size - 2) {
end = file_size;
}
printf("destination rank %d: filesize%d: chunksize = %d\n", i,file_size, chunksize);
}
}
else{
// Receive the file size from process 0
//long file_size;
// Allocate memory for the chunks
//int chunksize = file_size / size;
char* buffer = (char*)malloc(file_size * sizeof(char));
// Receive the chunk of the file and count the letters
MPI_Recv(buffer, file_size, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &status);
int count;
MPI_Get_count(&status, MPI_CHAR, &count);
printf("count%d: status = %d\n", count, status);
for (int j = 0; j < count; j++) {
char c = buffer[j];
if (c >= 'a' && c <= 'z') {
letterCounts[c - 'a']++;
}
else if (c >= 'A' && c <= 'Z') {
letterCounts[c - 'A']++;
}
}
}
// Reduce the counts from each process to get the total count
int totalCounts[26];
MPI_Reduce(letterCounts, totalCounts, 26, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
// Print the total counts
for (int i = 0; i < 26; i++) {
printf("%c: %d\n", 'a' + i, totalCounts[i]);
}
}
MPI_Finalize();
return 0;
}
I have a message that I want to send in broadcast by using MPI_Bcast.
I have two structs with, near the others, dynamic arrays, and because of that I decided to use MPI_Pack and MPI_Unpack.
Here below my solution.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#include <stddef.h>
#define DEBUG 0
typedef struct Code {
char character;
int length;
char *code;
} Code;
typedef struct CodeDictionary {
int codesNr;
Code *codes;
} CodeDictionary;
typedef struct Header {
int size; // size of the message in bytes
MPI_Datatype *type; // message type
int position; // position in the buffer
} Header;
typedef unsigned char BYTE;
int getRand(const int from, const int to)
{
int num = (rand() % (to - from + 1)) + from;
return num;
}
void buildCodeDictionaryType(MPI_Datatype *CodeDictType) {
int blockLengths[] = {1, 1};
MPI_Datatype types[] = {MPI_CHAR, MPI_INT};
MPI_Aint offsets[2];
offsets[0] = offsetof(Code, character);
offsets[1] = offsetof(Code, length);
MPI_Type_create_struct(2, blockLengths, offsets, types, CodeDictType);
MPI_Type_commit(CodeDictType);
}
BYTE* buildCodeDictionaryMsg(Header *header, CodeDictionary *dict) {
header->size = sizeof(int);
BYTE *buffer = malloc(sizeof(BYTE) * (header->size));
MPI_Pack(&dict->codesNr, 1, MPI_INT, buffer, header->size, &header->position, MPI_COMM_WORLD);
if (DEBUG == 1) {
printf("\ndict->codesNr = %d\n", dict->codesNr);
printf("header->size = %d\n", header->size);
printf("header->position = %d\n\n", header->position);
}
for (int i = 0; i < dict->codesNr; i++) {
header->size += sizeof(char) + sizeof(int) + (sizeof(char) * (dict->codes[i].length+1));
buffer = realloc(buffer, header->size);
MPI_Pack(&dict->codes[i], 1, *header->type, buffer, header->size, &header->position, MPI_COMM_WORLD);
if (DEBUG == 1) {
printf("before pack array - header->size = %d\n", header->size);
printf("before pack array - header->position = %d\n", header->position);
}
MPI_Pack(dict->codes[i].code, (dict->codes[i].length+1), MPI_CHAR, buffer, header->size, &header->position, MPI_COMM_WORLD);
if (DEBUG == 1) {
printf("after pack array - header->size = %d\n", header->size);
printf("after pack array - header->position = %d\n", header->position);
printf("\n");
}
}
return buffer;
}
void buildCodeDictionary(Header *header, CodeDictionary *dict, BYTE* buffer) {
MPI_Unpack(buffer, header->size, &header->position, &dict->codesNr, 1, MPI_INT, MPI_COMM_WORLD);
if (DEBUG == 1) {
printf("dict->codesNr = %d\n", dict->codesNr);
printf("header->size = %d\n", header->size);
printf("header->position = %d\n\n", header->position);
}
dict->codes = malloc(sizeof(Code) * dict->codesNr);
// I do it just for the first element because of test
MPI_Unpack(buffer, header->size, &header->position, &dict->codes[0], 1, *header->type, MPI_COMM_WORLD);
if (DEBUG == 1) {
printf("before unpack - header->size = %d\n", header->size);
printf("before unpack - header->position = %d\n", header->position);
}
dict->codes[0].code = malloc(sizeof(char) * (dict->codes[0].length+1));
MPI_Unpack(buffer, header->size, &header->position, &dict->codes[0].code, (dict->codes[0].length+1), MPI_CHAR, MPI_COMM_WORLD);
if (DEBUG == 1) {
printf("after unpack - header->size = %d\n", header->size);
printf("after unpack - header->position = %d\n", header->position);
}
// just for test
printf("character: %c\tlength: %d\tcode: ",
dict->codes[0].character,
dict->codes[0].length);
printf("%s\n", dict->codes[0].code); // it crashes here
// if it works do a for in order to unpack all the data
// ...
}
int main(int argc, char** argv) {
MPI_Init(NULL, NULL);
// Get the number of processes
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Get the rank of the process
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
// Create a CodeDictionary variable
CodeDictionary dict;
if (world_rank == 0)
printf("sending\n\n");
if (world_rank == 0) {
dict.codesNr = getRand(5, 9);
dict.codes = malloc(sizeof(Code) * dict.codesNr);
// create some fake values
for (int i = 0; i < dict.codesNr; i++) {
dict.codes[i].character = 'a' + i;
dict.codes[i].length = getRand(1, 9);
dict.codes[i].code = malloc(sizeof(char) * (dict.codes[i].length+1));
for (int j = 0; j < dict.codes[i].length; j++) {
int randChar = getRand('a', 'z');
dict.codes[i].code[j] = randChar + j;
}
dict.codes[i].code[dict.codes[i].length] = '\0';
if (DEBUG == 1)
printf("strlen(dict.charEncoding[%d].encoding): %d\n", i, strlen(dict.codes[i].code));
}
printf("source data\n");
for (int i = 0; i < dict.codesNr; i++) {
printf("codes[%d]:\n\t", i);
printf("character: %c\tlength: %d\tcode: ", dict.codes[i].character, dict.codes[i].length);
for (int j = 0; j < dict.codes[i].length; j++)
printf("%c", dict.codes[i].code[j]);
printf("\n");
}
}
MPI_Datatype codeDictType;
buildCodeDictionaryType(&codeDictType);
Header header = {.size = 0, .position = 0, .type = NULL};
header.type = &codeDictType;
BYTE *buffer = NULL;
if (world_rank == 0)
buffer = buildCodeDictionaryMsg(&header, &dict);
MPI_Bcast(&header.size, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (DEBUG == 1)
printf("rank %d: header.size = %d\n", world_rank, header.size);
if (world_rank != 0)
buffer = calloc(header.size, sizeof(BYTE));
MPI_Bcast(buffer, header.size, MPI_PACKED, 0, MPI_COMM_WORLD);
if (world_rank != 0) {
printf("\nreceiving\n\n");
buildCodeDictionary(&header, &dict, buffer);
// printf("received data\n");
// for (int i = 0; i < dict.codesNr; i++) {
// printf("codes[%d]:\n\t", i);
// printf("character: %c\tlength: %d\tcode: ", dict.codes[i].character, dict.codes[i].length);
// for (int j = 0; j < dict.codes[i].length; j++)
// printf("%c", dict.codes[i].code[j]);
// printf("\n");
// }
}
free(buffer);
MPI_Type_free(&codeDictType);
for (int i = 0; i < dict.codesNr; i++)
free(dict.codes[i].code);
free(dict.codes);
MPI_Finalize();
return 0;
}
For some reason this line of code doesn't work MPI_Unpack(buffer, header->size, &header->position, &dict->codes[0].code, (dict->codes[0].length+1), MPI_CHAR, MPI_COMM_WORLD); because if I try to print the received array, the run crashes with segmentation fault as error.
I don't understand why it happens, I take care of the memory by allocating the right size, \0 character included.
Do you know what is the problem?
I am building up an example with variable no. of processes and bind them to the sockets in a small network with different architecture and number of cpus.
I compile and run with:
mpiicpc avg_4.c -qopenmp -axSSE4.2,AVX,CORE-AVX2 -O3 -par-affinity=noverbose,granularity=core,compact -o b
mpiexec.hydra -machinefile f19 -genv I_MPI_PIN=1 -genv I_MPI_PIN_DOMAIN=socket -genv I_MPI_PIN_ORDER=compact -n 1 ./b
The network (master + slave19) f19 is:
s19:1
ma:1
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sched.h>
#include <mpi.h>
int *create_mlu(int n_omp, int ws) {
int *mlu = (int *)calloc(n_omp * ws, sizeof(int));
for (int i = 0; i < ws; i++)
for (int j = 0; j < n_omp; j++)
mlu[j + i*n_omp] = j + 100 * i;
return mlu;
}
int *C4_Re(int *mal, int n_omp, int wr, int ws) {
int *rM8 = (int *)malloc(sizeof(int) * n_omp);
char nod[MPI_MAX_PROCESSOR_NAME];
int n_l; MPI_Get_processor_name(nod, &n_l);
#pragma omp parallel for
for (int i = 0; i < n_omp; i++) {
rM8[i] = mal[i] + 10 * omp_get_thread_num();
printf("ws%2d\t\tmpi%2d\t\tmaxTh%2d\t\tmaxPr%2d\t\tomp%2d\t\tcore%3d\t\trM8%4d\t\tnod %s\n", ws, wr, omp_get_num_threads(), omp_get_num_procs(), omp_get_thread_num(), sched_getcpu(), rM8[i], nod);
}
return rM8;
}
int main(void) {
MPI_Init(NULL, NULL);
int ts[2] = {7, 9}; //no of processes
for (int t = 0; t < 2; t++) {
int ws = ts[t];
int errcodes[ws];
MPI_Comm parentcomm, intercomm;
MPI_Comm_get_parent(&parentcomm);
if (parentcomm == MPI_COMM_NULL) {
MPI_Comm_spawn("./b", MPI_ARGV_NULL, ws, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &intercomm, errcodes);
//printf("I'm the parent.\n");
}
else {
int wr; MPI_Comm_rank(MPI_COMM_WORLD, &wr);// printf("wr %d\n", wr);
//int ps; MPI_Comm_size(parentcomm, &ps);// printf("ps %d\n", ps);
//int pr; MPI_Comm_rank(parentcomm, &pr);// printf("pr %d\n", pr);
int n_omp = 8, *mlu = NULL;
if (wr == 0) {
mlu = create_mlu(n_omp, ws);
//for (int i = 0; i < n_omp*ws; i++) printf("\tmlu[%2d] = %d\n", i, mlu[i]);
}
int *mal = (int *)malloc(n_omp * sizeof(int));
MPI_Scatter(mlu, n_omp, MPI_INT, mal, n_omp, MPI_INT, 0, MPI_COMM_WORLD);
//for (int i = 0; i < n_omp; i++) printf("\t\tmal[%2d] = %d\trank %d\n", i, mal[i], wr);
int *rM8 = NULL;
rM8 = C4_Re(mal, n_omp, wr, ws);
int *rS8 = NULL;
if (wr == 0)
rS8 = (int *)malloc(sizeof(int) * ws * n_omp);
MPI_Gather(rM8, n_omp, MPI_INT, rS8, n_omp, MPI_INT, 0, MPI_COMM_WORLD);
if (wr == 0) {
//for (int i = 0; i < n_omp * ws; i++) printf("\t\trS8[%2d] = %d\n", i, rS8[i]);
free(mlu);
free(rS8); }
free(mal);
free(rM8);
}
//fflush(stdout);
}
fflush(stdout);
MPI_Finalize();
return 0;
}
I have a memory corruption which I need help to find
Some results look like
ws 7 rM8-37253944 nod ma mpi 7 maxTh 6 maxPr 6 omp 4 core 4
but they must look like
ws 7 rM8 624 nod ma mpi 6 maxTh 6 maxPr 6 omp 2 core 2
addition questions
1 - why using parentcomm for Scatter and Gather is not correct? In my opinion parentcomm is the new communicator
2 - should I create different comunicators for 7 and 9?
3 - mpicc gives me wrong results I don't know why
I have the following data structure which I'm trying to send with MPI_Gather:
struct set {
int nbits;
char bits[];
};
Problem is that I'm unable to gather all items of the above structure, only the first item. The remaining items simply doesn't make sense.
Here is a testcase:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define SIZE 10
struct set {
int nbits;
char bits[];
};
int main(int argc, char *argv[]) {
int np, rank, i;
struct set *subsets, *single;
void *buf;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &np);
single = malloc(sizeof(struct set) + SIZE);
if(rank == 0) {
subsets = malloc((sizeof(struct set) + SIZE) * np);
}
buf = &subsets[0];
MPI_Datatype set_type, oldtypes[2];
int blockcounts[2];
MPI_Aint offsets[2];
MPI_Aint addr[3];
MPI_Get_address(single, &addr[0]);
MPI_Get_address(&single->nbits, &addr[1]);
MPI_Get_address(&single->bits, &addr[2]);
offsets[0] = addr[1] - addr[0];
oldtypes[0] = MPI_INT;
blockcounts[0] = 1;
offsets[1] = addr[2] - addr[0];
oldtypes[1] = MPI_CHAR;
blockcounts[1] = SIZE;
MPI_Type_create_struct(2, blockcounts, offsets, oldtypes, &set_type);
MPI_Type_commit(&set_type);
single->nbits = 2;
for(i=0; i<single->nbits; i++)
single->bits[i] = 'A' + rank;
MPI_Gather(single, 1, set_type, buf, 1, set_type, 0, MPI_COMM_WORLD);
if(rank == 0) {
void *ptr;
struct set *fs;
int size;
MPI_Type_size(set_type, &size);
ptr = buf;
for(i=0; i<np; i++) {
size_t j;
fs = ptr;
printf("from rank %d: bits => %p nbits => %d\n", i, fs->bits, fs->nbits);
for(j=0; j<2; j++)
printf("from rank %d: buf[%d] = %#x\n",
i, j, fs->bits[j]);
ptr += size;
}
}
MPI_Type_free(&set_type);
MPI_Finalize();
}
Any help would be appreciated.
The problem isn't so much in the MPI as in the pointer arithmetic with structs and with MPI types.
You have
void *ptr;
struct set *fs;
int size;
MPI_Type_size(set_type, &size);
ptr = buf;
for(i=0; i<np; i++) {
size_t j;
fs = ptr;
printf("from rank %d: bits => %p nbits => %d\n", i, fs->bits, fs->nbits);
for(j=0; j<2; j++)
printf("from rank %d: buf[%d] = %#x\n",
i, j, fs->bits[j]);
ptr += size;
}
}
But MPI_Type_size actually gives the amount of data in the type; if there's padding (which there probably will be here to get the character array to be on a word boundary) this isn't the same as sizeof. If you want to use MPI functions here, if you switch that one function call to MPI_Type_extent, which actually tells you the entire extent spanned by the type, your code runs for me... but there's still a problem.
If you take a look at the difference between sizeof(struct set)+SIZE and MPI_Type_extent() you'll see they're not the same; this:
#define SIZE 10
struct set {
int nbits
char nbits[]
}
...
malloc(sizeof(struct set)+SIZE);
isn't the same as
struct set {
int nbits
char nbits[SIZE]
}
malloc(sizeof(struct set));
because of padding, etc. This means that the size of subsets is wrong, and there's a memory error when you call MPI_Gather.
You can get around this in a few different ways, but the simplest (and shortest in terms of line count) is to define the structure with the array already sized, and then use array indexing instead of pointer arithmetic:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define SIZE 10
struct set {
int nbits;
char bits[SIZE];
};
int main(int argc, char *argv[]) {
int np, rank, i;
struct set *subsets, *single;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &np);
single = malloc(sizeof(struct set));
if(rank == 0) {
subsets = malloc(sizeof(struct set) * np);
}
MPI_Datatype set_type, oldtypes[2];
int blockcounts[2];
MPI_Aint offsets[2];
MPI_Aint addr[3];
MPI_Get_address(single, &addr[0]);
MPI_Get_address(&single->nbits, &addr[1]);
MPI_Get_address(&single->bits, &addr[2]);
offsets[0] = addr[1] - addr[0];
oldtypes[0] = MPI_INT;
blockcounts[0] = 1;
offsets[1] = addr[2] - addr[0];
oldtypes[1] = MPI_CHAR;
blockcounts[1] = SIZE;
MPI_Type_create_struct(2, blockcounts, offsets, oldtypes, &set_type);
MPI_Type_commit(&set_type);
single->nbits = 2;
for(i=0; i<single->nbits; i++)
single->bits[i] = 'A' + rank;
MPI_Gather(single, 1, set_type, &(subsets[0]), 1, set_type, 0, MPI_COMM_WORLD);
if(rank == 0) {
for(i=0; i<np; i++) {
struct set *fs = &(subsets[i]);
printf("from rank %d: bits => %p nbits => %d\n", i, fs->bits, fs->nbits);
for(int j=0; j<2; j++)
printf("from rank %d: buf[%d] = %#x\n",
i, j, fs->bits[j]);
}
}
MPI_Type_free(&set_type);
MPI_Finalize();
}
Updated to add And if you can't do that, just change the size of your buffer alloc to gather the data into:
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#define SIZE 10
struct set {
int nbits;
char bits[];
};
int main(int argc, char *argv[]) {
int np, rank, i;
struct set *single;
void *buf;
ptrdiff_t extent;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &np);
single = malloc(sizeof(struct set) + SIZE);
MPI_Datatype set_type, oldtypes[2];
int blockcounts[2];
MPI_Aint offsets[2];
MPI_Aint addr[3];
MPI_Get_address(single, &addr[0]);
MPI_Get_address(&single->nbits, &addr[1]);
MPI_Get_address(&single->bits, &addr[2]);
offsets[0] = addr[1] - addr[0];
oldtypes[0] = MPI_INT;
blockcounts[0] = 1;
offsets[1] = addr[2] - addr[0];
oldtypes[1] = MPI_CHAR;
blockcounts[1] = SIZE;
MPI_Type_create_struct(2, blockcounts, offsets, oldtypes, &set_type);
MPI_Type_commit(&set_type);
MPI_Type_extent(set_type, &extent);
buf = malloc((int)extent * np);
single->nbits = 2;
for(i=0; i<single->nbits; i++)
single->bits[i] = 'A' + rank;
MPI_Gather(single, 1, set_type, buf, 1, set_type, 0, MPI_COMM_WORLD);
if(rank == 0) {
struct set *fs = buf;
for(i=0; i<np; i++) {
printf("from rank %d: bits => %p nbits => %d\n", i, fs->bits, fs->nbits);
for(int j=0; j<2; j++)
printf("from rank %d: buf[%d] = %#x\n",
i, j, fs->bits[j]);
fs = (struct set *)((char *)fs + extent);
}
}
MPI_Type_free(&set_type);
MPI_Finalize();
}
I am a newbie to C and MPI.
I have the following code which I am using with MPI.
#include "RabinKarp.c"
#include <stdio.h>
#include <stdlib.h>
#include<string.h>
#include<math.h>
#include </usr/include/mpi/mpi.h>
typedef struct {
int lowerOffset;
int upperOffset;
int processorNumber;
} patternPartitioning;
int rank;
FILE *fp;
char* filename = "/home/rohit/Downloads/10_seqs_2000_3000_bp.fasta";
int n = 0;
int d = 0;
//number of processors
int k, i = 0, lower_limit, upper_limit;
int main(int argc, char** argv) {
char* pattern= "taaat";
patternPartitioning partition[k];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &k);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
fp = fopen(filename, "rb");
if (fp != '\0') {
fseek(fp, 0L, SEEK_END);
n = ftell(fp);
fseek(fp, 0L, SEEK_SET);
}
//Do for Master Processor
if(rank ==0){
int m = strlen(pattern);
printf("pattern length is %d \n", m);
d = (int)(n - m + 1) / k;
for (i = 0; i <= k - 2; i++) {
lower_limit = round(i * d);
upper_limit = round((i + 1) * d) + m - 1;
partition->lowerOffset = lower_limit;
partition->upperOffset = upper_limit;
partition->processorNumber = i+1;
// k-2 times calculate the limits like this
printf(" the lower limit is %d and upper limit is%d\n",
partition->lowerOffset, partition->upperOffset);
int mpi_send_block[2];
mpi_send_block[0]= lower_limit;
mpi_send_block[1] = upper_limit;
MPI_Send(mpi_send_block, 2, MPI_INT, i+1, i+1, MPI_COMM_WORLD);
//int MPI_Send(void *buf, int count, MPI_Datatype dtype, int dest, int tag, MPI_Comm comm);
}
// for the last processor calculate the index here
lower_limit = round((k - 1) * d);
upper_limit = n;
partition->lowerOffset = lower_limit;
partition->upperOffset = n;
partition->processorNumber = k;
printf("Processor : %d : has start : %d : and end : %d :\n",rank,partition->lowerOffset,partition->upperOffset);
//perform the search here
int size = partition->upperOffset-partition->lowerOffset;
char *text = (char*) malloc (size);
fseek(fp,partition->lowerOffset , SEEK_SET);
fread(&text, sizeof(char), size, fp);
printf("read in rank0");
fputs(text,stdout);
int number =0;
fputs(text,stdout);
fputs(pattern,stdout);
number = rabincarp(text,pattern);
for (i = 0; i <= k - 2; i++) {
int res[1];
res[0]=0;
MPI_Status status;
// MPI_Recv(res, 1, MPI_INT, i+1, i+1, MPI_COMM_WORLD, &status);
// number = number + res[0];
}
printf("\n\ntotal number of result found:%d\n", number);
} else {
patternPartitioning mypartition;
MPI_Status status;
int number[1];
int mpi_recv_block[2];
MPI_Recv(mpi_recv_block, 2, MPI_INT, 0, rank, MPI_COMM_WORLD,
&status);
printf("Processor : %d : has start : %d : and end : %d :\n",rank,mpi_recv_block[0],mpi_recv_block[1]);
//perform the search here
int size = mpi_recv_block[1]-mpi_recv_block[0];
char *text = (char*) malloc (size);
fseek(fp,mpi_recv_block[0] , SEEK_SET);
fread(&text, sizeof(char), size, fp);
printf("read in rank1");
// fread(text,size,size,fp);
printf("length of text segment by proc: %d is %d",rank,(int)strlen(text));
number[0] = rabincarp(text,pattern);
//MPI_Send(number, 1, MPI_INT, 0, rank, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
fclose(fp);
return (EXIT_SUCCESS);
}
if I run( mpirun -np 2 pnew ) this I am getting the following error:
[localhost:03265] *** Process received signal ***
[localhost:03265] *** Process received signal ***
--------------------------------------------------------------------------
mpirun noticed that process rank 1 with PID 3265 on node localhost exited on signal 7 (Bus error).
so if I remove the fread() statements I dont get the error.. can anyone tell me what am I missing?
char *text = (char*) malloc (size);
fseek(fp,partition->lowerOffset , SEEK_SET);
fread(&text, sizeof(char), size, fp);
The documentation for fread says "The function fread() reads nmemb elements of data, each size bytes long, from the stream pointed to by stream, storing them at the location given by ptr."
Since text is a char *, &text is the address of a char *. That won't have enough space to hold the data you're reading. You want to pass fread the address of the memory you allocated, not the address of the variable holding that address! (So remove the &.)
if (fp != '\0') {
fp is FILE* , '\0' is an int constant.
This is not the error, but I suggest you compile with a higher warning level to catch this kind of errors.