fread() in MPI is giving Signal 7 Bus Error - c

I am a newbie to C and MPI.
I have the following code which I am using with MPI.
#include "RabinKarp.c"
#include <stdio.h>
#include <stdlib.h>
#include<string.h>
#include<math.h>
#include </usr/include/mpi/mpi.h>
typedef struct {
int lowerOffset;
int upperOffset;
int processorNumber;
} patternPartitioning;
int rank;
FILE *fp;
char* filename = "/home/rohit/Downloads/10_seqs_2000_3000_bp.fasta";
int n = 0;
int d = 0;
//number of processors
int k, i = 0, lower_limit, upper_limit;
int main(int argc, char** argv) {
char* pattern= "taaat";
patternPartitioning partition[k];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &k);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
fp = fopen(filename, "rb");
if (fp != '\0') {
fseek(fp, 0L, SEEK_END);
n = ftell(fp);
fseek(fp, 0L, SEEK_SET);
}
//Do for Master Processor
if(rank ==0){
int m = strlen(pattern);
printf("pattern length is %d \n", m);
d = (int)(n - m + 1) / k;
for (i = 0; i <= k - 2; i++) {
lower_limit = round(i * d);
upper_limit = round((i + 1) * d) + m - 1;
partition->lowerOffset = lower_limit;
partition->upperOffset = upper_limit;
partition->processorNumber = i+1;
// k-2 times calculate the limits like this
printf(" the lower limit is %d and upper limit is%d\n",
partition->lowerOffset, partition->upperOffset);
int mpi_send_block[2];
mpi_send_block[0]= lower_limit;
mpi_send_block[1] = upper_limit;
MPI_Send(mpi_send_block, 2, MPI_INT, i+1, i+1, MPI_COMM_WORLD);
//int MPI_Send(void *buf, int count, MPI_Datatype dtype, int dest, int tag, MPI_Comm comm);
}
// for the last processor calculate the index here
lower_limit = round((k - 1) * d);
upper_limit = n;
partition->lowerOffset = lower_limit;
partition->upperOffset = n;
partition->processorNumber = k;
printf("Processor : %d : has start : %d : and end : %d :\n",rank,partition->lowerOffset,partition->upperOffset);
//perform the search here
int size = partition->upperOffset-partition->lowerOffset;
char *text = (char*) malloc (size);
fseek(fp,partition->lowerOffset , SEEK_SET);
fread(&text, sizeof(char), size, fp);
printf("read in rank0");
fputs(text,stdout);
int number =0;
fputs(text,stdout);
fputs(pattern,stdout);
number = rabincarp(text,pattern);
for (i = 0; i <= k - 2; i++) {
int res[1];
res[0]=0;
MPI_Status status;
// MPI_Recv(res, 1, MPI_INT, i+1, i+1, MPI_COMM_WORLD, &status);
// number = number + res[0];
}
printf("\n\ntotal number of result found:%d\n", number);
} else {
patternPartitioning mypartition;
MPI_Status status;
int number[1];
int mpi_recv_block[2];
MPI_Recv(mpi_recv_block, 2, MPI_INT, 0, rank, MPI_COMM_WORLD,
&status);
printf("Processor : %d : has start : %d : and end : %d :\n",rank,mpi_recv_block[0],mpi_recv_block[1]);
//perform the search here
int size = mpi_recv_block[1]-mpi_recv_block[0];
char *text = (char*) malloc (size);
fseek(fp,mpi_recv_block[0] , SEEK_SET);
fread(&text, sizeof(char), size, fp);
printf("read in rank1");
// fread(text,size,size,fp);
printf("length of text segment by proc: %d is %d",rank,(int)strlen(text));
number[0] = rabincarp(text,pattern);
//MPI_Send(number, 1, MPI_INT, 0, rank, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
fclose(fp);
return (EXIT_SUCCESS);
}
if I run( mpirun -np 2 pnew ) this I am getting the following error:
[localhost:03265] *** Process received signal ***
[localhost:03265] *** Process received signal ***
--------------------------------------------------------------------------
mpirun noticed that process rank 1 with PID 3265 on node localhost exited on signal 7 (Bus error).
so if I remove the fread() statements I dont get the error.. can anyone tell me what am I missing?

char *text = (char*) malloc (size);
fseek(fp,partition->lowerOffset , SEEK_SET);
fread(&text, sizeof(char), size, fp);
The documentation for fread says "The function fread() reads nmemb elements of data, each size bytes long, from the stream pointed to by stream, storing them at the location given by ptr."
Since text is a char *, &text is the address of a char *. That won't have enough space to hold the data you're reading. You want to pass fread the address of the memory you allocated, not the address of the variable holding that address! (So remove the &.)

if (fp != '\0') {
fp is FILE* , '\0' is an int constant.
This is not the error, but I suggest you compile with a higher warning level to catch this kind of errors.

Related

Read txt file each letter count

i tried to read a txt file and count all a to z each letter count without considering uppercase and lowercase. i need to ignore other characters and spaces. but i cannot get correct number count with changig the process count. process need to be in between 1 to 100. with 2 number of process it will show correct count but when i increase the number it shows wrong count.
//FILE READING USING MPI FUNCTION
#include <stdio.h>
#include <mpi.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char* argv[]) {
int size;
int rank;
int tag = 0;
int start;
int letterCounts[26];
MPI_Status status;
int chunksize;
MPI_Offset file_size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(size >= 101){
printf(" Process failed");
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_File file;
MPI_File_open(MPI_COMM_WORLD, "warandpeace.txt", MPI_MODE_RDONLY, MPI_INFO_NULL, &file);
if (file == MPI_FILE_NULL) {
printf("Error opening file!\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
// find the file size
MPI_File_get_size(file, &file_size);
//printf("Process %d: filesize %d\n", rank,file_size);
// Allocate a buffer to hold the file contents
char* buffer = (char*)malloc(file_size * sizeof(char));
if(rank==0){
// Read the file into the buffer
MPI_File_read(file, buffer, file_size, MPI_CHAR, MPI_STATUS_IGNORE);
// Close the file
//MPI_File_close(&file);
//free(buffer);
//deviding the file
chunksize = file_size/(size - 1);
//printf("chunksize is %d\n",chunksize);
int end = chunksize;
for (int i = 1; i < size; i++) {
int start = 0;
MPI_Send(&buffer[start], end - start, MPI_CHAR, i, 0, MPI_COMM_WORLD);
start = end;
end += chunksize;
if (i == size - 2) {
end = file_size;
}
printf("destination rank %d: filesize%d: chunksize = %d\n", i,file_size, chunksize);
}
}
else{
// Receive the file size from process 0
//long file_size;
// Allocate memory for the chunks
//int chunksize = file_size / size;
char* buffer = (char*)malloc(file_size * sizeof(char));
// Receive the chunk of the file and count the letters
MPI_Recv(buffer, file_size, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &status);
int count;
MPI_Get_count(&status, MPI_CHAR, &count);
printf("count%d: status = %d\n", count, status);
for (int j = 0; j < count; j++) {
char c = buffer[j];
if (c >= 'a' && c <= 'z') {
letterCounts[c - 'a']++;
}
else if (c >= 'A' && c <= 'Z') {
letterCounts[c - 'A']++;
}
}
}
// Reduce the counts from each process to get the total count
int totalCounts[26];
MPI_Reduce(letterCounts, totalCounts, 26, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
// Print the total counts
for (int i = 0; i < 26; i++) {
printf("%c: %d\n", 'a' + i, totalCounts[i]);
}
}
MPI_Finalize();
return 0;
}

How to share a string array using open mpi

I'm new in openmpi and I don't know how to use scatter and gather to send an array of strings to all processors. I would like to divide an array and send it to each processor, but all I can divide are the characters of a single array element. Can anyone help me please?
Here is my code:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mpi.h"
#define MASTER 0
#define BUF_SIZE 2048
#define CHAR_SIZE 900
#define CHARS 13
#define MAX_SIZE 3500
#define NUMBER_OF_FILES 2
int main(int argc, char** argv) {
int number_of_words = 0;
int total_rows = 0;
int i, j = 0;
char **words = (char**) calloc(MAX_SIZE, sizeof (char*));
for (i = 0; i < MAX_SIZE; i++) {
words[i] = (char*) calloc(CHARS, sizeof (char));
}
char **local_words = (char**) calloc(MAX_SIZE, sizeof (char*));
for (i = 0; i < MAX_SIZE; i++) {
local_words[i] = (char*) calloc(CHARS, sizeof (char));
}
char **rec_words = (char**) calloc(MAX_SIZE, sizeof (char*));
for (i = 0; i < MAX_SIZE; i++) {
rec_words[i] = (char*) calloc(CHARS, sizeof (char));
}
char str_righe[BUF_SIZE][CHAR_SIZE];
FILE *f = NULL;
char f_title[10];
char str_nfiles[10];
char delim[10] = {10, 32, 33, 39, 44, 46, 58, 59, 63};
char *ptr;
int rank;
int size;
int message_length;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 1; i <= NUMBER_OF_FILES; i++) {
strcpy(f_title, "f");
sprintf(str_nfiles, "%d", i);
strcat(f_title, str_nfiles);
strcat(f_title, ".txt");
f = fopen(f_title, "r");
while (fgets(str_righe[j], BUF_SIZE, f)) {
str_righe[j][strlen(str_righe[j])] = '\0';
j++;
}
fclose(f);
}
total_rows = j;
for (i = 0; i < total_rows; ++i) {
ptr = strtok(str_righe[i], delim);
while (ptr != NULL) {
strcpy(words[number_of_words], ptr);
ptr = strtok(NULL, delim);
number_of_words++;
}
}
message_length = number_of_words / size;
if (rank == MASTER) {
for (i = 0; i < number_of_words; i++)
printf("%s\n", words[i]);
}
MPI_Scatter(*words, message_length, MPI_CHAR, *local_words, message_length, MPI_CHAR, MASTER, MPI_COMM_WORLD);
printf("rank %d, fragment: \t%s\n", rank, *local_words);
MPI_Gather(*local_words, message_length, MPI_CHAR, *rec_words, message_length, MPI_CHAR, MASTER, MPI_COMM_WORLD);
if (rank == MASTER) {
printf("rank %d, gathered: \t%s\n", rank, *rec_words);
}
MPI_Finalize();
return EXIT_SUCCESS;
}
I expect the output:
iMac-di-iMac01:mpi macbook$ mpirun -n 2 main
Good
time
by
antonio
rank 0, fragment: Good time
rank 1, fragment: by antonio
rank 0, gathered: Good time by antonio
But the actual output is:
iMac-di-iMac01:mpi macbook$ mpirun -n 2 main
Good
time
by
antonio
rank 0, fragment: Go
rank 1, fragment: od
rank 0, gathered: Good
I realized that I never shared the solution to the problem. I do it now:
I created the matrix variable and I sent the one with the scatter. In this way the slaves received the words and not the characters
int *matrix = 0;
matrix = malloc(sizeof (int) * n_words);
j = 1;
for (i = 0; i <= n_words; i++) {
matrix[i] = j;
j++;
}
n_words_cpu = n_words / (size);
procRow = malloc(sizeof (int) * n_words); // received row will contain p integers
MPI_Scatter(
/* send_data = */ matrix,
/* send_count = */ n_words_cpu,
/* send_datatype = */ MPI_INT,
/* recv_data = */ procRow,
/* recv_count = */ n_words_cpu,
/* recv_datatype = */ MPI_INT,
/* root = */ MASTER,
/* MPI_commuicator = */ MPI_COMM_WORLD);

MPI Search In Array

Im trying to find a spesific value inside an array. Im trying to find it with parallel searching by mpi. When my code finds the value, it shows an error.
ERROR
Assertion failed in file src/mpid/ch3/src/ch3u_buffer.c at line 77: FALSE
memcpy argument memory ranges overlap, dst_=0x7ffece7eb590 src_=0x7ffece7eb590 len_=4
PROGRAM
const char *FILENAME = "input.txt";
const size_t ARRAY_SIZE = 640;
int main(int argc, char **argv)
{
int *array = malloc(sizeof(int) * ARRAY_SIZE);
int rank,size;
MPI_Status status;
MPI_Request request;
int done,myfound,inrange,nvalues;
int i,j,dummy;
/* Let the system do what it needs to start up MPI */
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);
myfound=0;
if (rank == 0)
{
createFile();
array = readFile(FILENAME);
}
MPI_Bcast(array, ARRAY_SIZE, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Irecv(&dummy, 1, MPI_INT, MPI_ANY_SOURCE, 1, MPI_COMM_WORLD, &request);
MPI_Test(&request, &done, &status);
nvalues = ARRAY_SIZE / size; //EACH PROCESS RUNS THAT MUCH NUMBER IN ARRAY
i = rank * nvalues; //OFFSET FOR EACH PROCESS INSIDE THE ARRAY
inrange = (i <= ((rank + 1) * nvalues - 1) && i >= rank * nvalues); //LIMIT OF THE OFFSET
while (!done && inrange)
{
if (array[i] == 17)
{
dummy = 1;
for (j = 0; j < size; j++)
{
MPI_Send(&dummy, 1, MPI_INT, j, 1, MPI_COMM_WORLD);
}
printf("P:%d found it at global index %d\n", rank, i);
myfound = 1;
}
printf("P:%d - %d - %d\n", rank, i, array[i]);
MPI_Test(&request, &done, &status);
++i;
inrange = (i <= ((rank + 1) * nvalues - 1) && i >= rank * nvalues);
}
if (!myfound)
{
printf("P:%d stopped at global index %d\n", rank, i - 1);
}
MPI_Finalize();
}
Error is somewhere in here because when i put an invalid number for example -5 into if condition, program runs smoothly.
dummy = 1;
for (j = 0; j < size; j++)
{
MPI_Send(&dummy, 1, MPI_INT, j, 1, MPI_COMM_WORLD);
}
printf("P:%d found it at global index %d\n", rank, i);
myfound = 1;
Thanks
Your program is invalid with respect to the MPI standard because you use the same buffer (&dummy) for both MPI_Irecv() and MPI_Send().
You can either use two distinct buffers (e.g. dummy_send and dummy_recv), or since you do not seem to care about the value of dummy, then use NULL as buffer and send/receive zero size messages.

Make slaves wait for MPI_Bcast from master

I'm trying to write a parallel program that implements a pipeline version of Gaussian elimination, using MPI and C language...
However I'm encountering some difficulties early in the implementation of the code....
I use a root process to read a data matrix from a text file... this process gives-me the size of this matrix and I broadcast the size of it to all other processes in order for them to allocate it in memory... However, the slave processes are trying to allocate it before the broadcast from the root...
How can I make them wait?
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <mpi.h>
int CalcInd(int i, int j, int dimL)
{
return i*dimL +j;
}
int main (int argc, char **argv)
{
FILE *fin, *fout;
char fA[] = "Matrix.txt";
int rank, size, i, ii, j, k, m, n, picked, tmp, total;
int counter=0, elements=0;
int * RightNeigbhor, * LeftNeigbhor, * loc;
float f, magnitude, t;
float * A, * x;
MPI_Status status;
MPI_Request request;
// MPI initialization
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Barrier(MPI_COMM_WORLD);
if(rank == 0)
{
// Defenição dos processos vizinhos pelo master
RightNeigbhor = (int *)calloc(size,sizeof(int));
if(RightNeigbhor==NULL)
{printf("!!! Could not allocate memory !!!\n"); exit(-1);}
LeftNeigbhor = (int *)calloc(size,sizeof(int));
if(RightNeigbhor==NULL)
{printf("!!! Could not allocate memory !!!\n"); exit(-1);}
for(i = 0; i < size; i++ )
{
RightNeigbhor[i] = (rank + 1) % size;
LeftNeigbhor[i] = (rank - 1) % size;
}
// Broadcast os processos vizinhos para todos os processos
MPI_Bcast ( RightNeigbhor, size, MPI_INTEGER, rank, MPI_COMM_WORLD );
MPI_Bcast ( LeftNeigbhor, size, MPI_INTEGER, rank, MPI_COMM_WORLD );
// Leitura da matriz A pelo master
fin = fopen ( fA, "r" );
if (fin == NULL){ printf("!!! FILE NOT FOUND !!!"); exit(-1); }
while( !feof(fin))
{
fscanf (fin, "%f", &f);
elements++;
}
rewind(fin);
f = 0;
while( !feof(fin))
{
if(fgetc(fin) == '\n')
{
counter++;
}
}
rewind(fin);
n = counter;
m = (elements-1) / counter;
total = n*m;
MPI_Bcast ( &total, 1, MPI_INT, rank, MPI_COMM_WORLD );
MPI_Bcast ( &n, 1, MPI_INT, rank, MPI_COMM_WORLD );
}
// Alocação de variaveis
A = (float *)calloc(total,sizeof(float));
if(A==NULL){printf("!!! Could not allocate memory !!!\n"); exit(-1);}
loc = (int *)calloc(n,sizeof(int*));
if(loc==NULL){printf("!!! Could not allocate memory !!!\n"); exit(-1);}
// AND IT GOES ON AND ON
Everything in your rank == 0 block runs only in process 0. While process rank == 1 ... n just skip that block. Therefore, you have to put your MPI_Bcast calls in an environment which is visible for all process in MPI_Comm comm here MPI_COMM_WORLD. When process 1...n skip all the initialization and jump to the broadcast before process 0 reaches it, they will wait till the bcast has occured.

C MPI array passing

Why can't I access muhray 8 lines from the bottom? The print lines that start with "!!" work correctly but I can't seem to get the right values at the very end.
Here is my output:
[computer#node01 ~]$ mpiexec -n 8 ./presum 1000
!! proc0's array is size 125 and goes from 1 to 1
proc0's array is size 125 and goes from 4693173 to 1819307369
!! proc2's array is size 125 and goes from 1 to 1
proc2's array is size 125 and goes from 4693173 to 1819307369
!! proc3's array is size 125 and goes from 1 to 1
proc3's array is size 125 and goes from 4693173 to 1819307369
!! proc1's array is size 125 and goes from 1 to 1
proc1's array is size 125 and goes from 4693173 to 1819307369
!! proc4's array is size 125 and goes from 1 to 1
proc4's array is size 125 and goes from 4693173 to 1819307369
!! proc5's array is size 125 and goes from 1 to 1
proc5's array is size 125 and goes from 4693173 to 1819307369
!! proc6's array is size 125 and goes from 1 to 1
proc6's array is size 125 and goes from 4693173 to 1819307369
!! proc7's array is size 125 and goes from 1 to 1
proc7's array is size 125 and goes from 4693173 to 1819307369
Here is the code in question:
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//max size of the data array to split up
#define MAXSIZE 1000000
//methods
int checkInput(int nprocs, int argc, char *argv[], int id);
//mpi send & rec tags
int ARSIZE = 0; //array size
int ARR = 1; //array
int MSM = 2; //slave sum
int main(int argc, char *argv[]) {
int ARsize; /*size of the array to pre-sum*/
int id; /*process id number*/
int nprocs; /*number of processors*/
int i, j, k; /*counters*/
int muhsize; /*size of personal array to calculate*/
int * muhray; /**/
//MPI framework
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
MPI_Barrier(MPI_COMM_WORLD);
//pull input, check values, return ARsize
ARsize = checkInput(nprocs, argc, argv, id);
//set up array, serial run, send out chunks
if (!id) {
//variables only the zero node needs
int data[ARsize]; /*full original array of numbers*/
int chunkSize, upper, lower; /*vars to determine cunksize to send out*/
int smoothCount = 0; /*BOOL for uneven division chunksize*/
//fill array with numbers
for (i = 0; i < ARsize; i++) {
data[i] = 1;
}
//sequential solution here
//determine chunkSize
chunkSize = (int) (ARsize/nprocs);
if (ARsize % nprocs != 0) {
chunkSize = chunkSize + 1;
smoothCount = 1;
}
//send chunks of data to procs
for (i = 0; i < nprocs; i++) {
lower = i * chunkSize;
upper = ((i+1) * chunkSize) - 1;
if (i == nprocs-1 && smoothCount == 1) {
upper = ARsize-1;
}
int intarray[(upper-lower)];
for (k = lower, j = 0; k <= upper; k++, j++) {
intarray[j] = data[k];
}
if(i > 0) {
//send array size
MPI_Send(&j, 1, MPI_INT, i, ARSIZE, MPI_COMM_WORLD);
//send actual array
MPI_Send(intarray, j, MPI_INT, i, ARR, MPI_COMM_WORLD);
}
//zero no send to self, this data used later for all nodes calc
else {
muhsize = j;
int muhray[muhsize];
for (j = 0; j <= chunkSize; j++) {
muhray[j] = intarray[j];
}
printf("!! proc%d's array is size %d and goes from %d to %d\n", id, muhsize, muhray[0], muhray[(muhsize-1)]);
}
}
}
else {
MPI_Recv(&muhsize, 1, MPI_INT, 0, ARSIZE, MPI_COMM_WORLD, &status);
int muhray[muhsize];
MPI_Recv(muhray, muhsize, MPI_INT, 0, ARR, MPI_COMM_WORLD, &status);
printf("!! proc%d's array is size %d and goes from %d to %d\n", id, muhsize, muhray[0], muhray[(muhsize-1)]);
fflush(stdout);
}
printf("proc%d's array is size %d and goes from %d to %d\n", id, muhsize, muhray[0], muhray[muhsize]);
fflush(stdout);
//MPI_Send(&muhsize, 1, MPI_INT, 0, MSM, MPI_COMM_WORLD);
MPI_Finalize();
}
//pull input, check values, return ARsize
int checkInput(int nprocs, int argc, char *argv[], int id) {
int size;
if (nprocs % 2 != 0 || nprocs == 6 || nprocs > 8) {
if (!id) printf("run with 2^k procs, (1 >= k <= 3)\n");
fflush(stdout);
MPI_Finalize();
exit(1);
}
if (argc != 2) {
if (!id) printf("Usage: presum [array size (max: %d)]\n", MAXSIZE);
fflush(stdout);
MPI_Finalize();
exit(1);
}
size = atoi(argv[1]);
if (size <= nprocs) {
if (!id) printf("search range must be greater than processor count\n");
fflush(stdout);
MPI_Finalize();
exit(1);
}
if (size > MAXSIZE) {
if (!id) printf("array size must be less than or equal to %d\n", MAXSIZE);
fflush(stdout);
MPI_Finalize();
exit(1);
}
return size;
}
The problem you have is very likely with scopes of variables. For instance here:
...
else {
MPI_Recv(&muhsize, 1, MPI_INT, 0, ARSIZE, MPI_COMM_WORLD, &status);
int muhray[muhsize];
MPI_Recv(muhray, muhsize, MPI_INT, 0, ARR, MPI_COMM_WORLD, &status);
printf("!! proc%d's array is size %d and goes from %d to %d\n", id, muhsize, muhray[0], muhray[(muhsize-1)]);
fflush(stdout);
}
printf("proc%d's array is size %d and goes from %d to %d\n", id, muhsize, muhray[0], muhray[muhsize]);
you declare int muhray[muhsize]; inside the scope of the else construct. When you exit this scope muhray is destroyed as it is a local variable. What you are using in the last printf seems to be an uninitialized int * muhray; declared immediately after the main.
Note that while they have the same name these two are different variables.
It seems that you're printing "muhray[(muhsize-1)]" twice and "muhray[muhsize]" at the end. You shall always print the value of "muhray[(muhsize-1)]".
The else-part with the two MPI_Recv calls uses a locally defined variable called "muhray", which is different from that defined initially in the main function (it actually shadows the muhray defined at the beginning of the main function). Thus "printf("!!..." uses a completely different variable than the last "printf("proc..." call.

Resources