2

I am working with some code and I encountered some problem with implementating of table size for matrix mult from concole input.

First version work on:

 const int size = 1000;
 int mat_a[size][size], mat_b[size][size], mat_c[size][size];

To use console arguments I found that there is need to implement dynamic array allocation. Unfortunatelly I encounter a problem:

 *** Process received signal ***
 Signal: Segmentation fault (11)
 Signal code: Address not mapped (1)
 Failing at address: 0x7ffd955237f8

I suppose that problem may be in MPI functions like Bcast, Scatter and Gather. I searched on similar cases in stackovf, but I cannot see it.

Here is the code:

#include <mpi.h>
#include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <math.h>

int main(int argc, char *argv[])
{
  int taskid, ntasks, mat_start, mat_end, i, j, k;
double start_time; //hold start time
double end_time; // hold end time  

  MPI_Init (&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &taskid);   
  MPI_Comm_size(MPI_COMM_WORLD, &ntasks); 

        int size = 0;

        if (argc != 2) {
                printf("No arguments");
                exit(-1);
        }

        size = atoi(argv[1]);

        if (size < 0 ) {
                printf("SIZE: %2d \n", size);
                exit(-1);
        }

int **mat_a = (int **)malloc(sizeof(int *)*size);
int **mat_b = (int **)malloc(sizeof(int *)*size);  
int **mat_c = (int **)malloc(sizeof(int *)*size);

for (int z = 0 ; z < size ; z++){
     mat_a[z] = (int *)malloc(sizeof(int)*size);
     mat_b[z] = (int *)malloc(sizeof(int)*size);
     mat_c[z] = (int *)malloc(sizeof(int)*size);
}

  mat_start = taskid * size/ntasks;
  mat_end = (taskid+1) * size/ntasks;

  if (taskid==0) {
    for (i = 0; i < size; i++) {
        for (j = 0; j < size; j++) {
            mat_a[i][j] = (int)(sin(i) * i * j) % 10;
        }
    }
    for (i = 0; i < size; i++) {
        for (j = 0; j < size; j++) {
            mat_b[i][j] = (int)(cos(j) * (i + j)) % 10;
        }
    }
  }

start_time = MPI_Wtime();

  MPI_Bcast (&mat_b, size*size, MPI_INT, 0, MPI_COMM_WORLD);
  MPI_Scatter (&mat_a, size*size/ntasks, MPI_INT, mat_a[mat_start], size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);

  printf("computing slice %d (from row %d to %d)\n", taskid, mat_start, mat_end-1);
  for (i=mat_start; i<mat_end; i++) 
    for (j=0; j<size; j++) {
      mat_c[i][j]=0;
      for (k=0; k<size; k++)
    mat_c[i][j] += mat_a[i][k]*mat_b[k][j];
    }

  MPI_Gather (mat_c[mat_start], size*size/ntasks, MPI_INT, mat_c, size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);

        end_time = MPI_Wtime();
        printf("\nRunning Time = %f\n\n", end_time - start_time);

  MPI_Finalize();
  return 0;
}

Can anyone tell me what is wrong?


EDIT:

Thanks for answer. I tried to implement your proposal of solution but with no good results. I changed some part of code to look like this:

int **mat_a=(int **)malloc(size*sizeof(int *));
        int **mat_b=(int **)malloc(size*sizeof(int *));
        int **mat_c=(int **)malloc(size*sizeof(int *));

    if(mat_a==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
    if(mat_b==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
    if(mat_c==NULL){fprintf(stderr,"malloc failed\n");exit(1);}

    mat_a[0]=(int*)malloc(size*size*sizeof(int));
 mat_b[0]=(int*)malloc(size*size*sizeof(int));
mat_c[0]=(int*)malloc(size*size*sizeof(int));

   if(mat_a[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
if(mat_b[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
if(mat_c[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}

  int ti;
    for(ti=1;ti<size;ti++){
        mat_a[ti]=&mat_a[0][size*ti];
mat_b[ti]=&mat_a[0][size*ti];
mat_c[ti]=&mat_a[0][size*ti];

 }

  mat_start = taskid * size/ntasks;
  mat_end = (taskid+1) * size/ntasks;

//populating the array ......

start_time = MPI_Wtime();

  MPI_Bcast(mat_a[0],size*size, MPI_INT,0,MPI_COMM_WORLD); 
 //MPI_Bcast (&mat_b, size*size, MPI_INT, 0, MPI_COMM_WORLD);
//  MPI_Scatter (&mat_b, size*size/ntasks, MPI_INT, mat_a[mat_start], size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);
  MPI_Scatter (mat_b[0], size*size/ntasks, MPI_INT, mat_a[mat_start], size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);

  printf("computing slice %d (from row %d to %d)\n", taskid, mat_start, mat_end-1);
  for (i=mat_start; i<mat_end; i++) 
    for (j=0; j<size; j++) {
      mat_c[i][j]=0;
      for (k=0; k<size; k++)
    mat_c[i][j] += mat_a[i][k]*mat_b[k][j];
    }

  MPI_Gather (mat_c[mat_start], size*size/ntasks, MPI_INT, mat_c, size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);

        end_time = MPI_Wtime();
        printf("\nRunning Time = %f\n\n", end_time - start_time);

  MPI_Finalize();
  return 0;
}

and program starts to run and even print mat_a (when added printing), but after some delay I got this:

[cuda:05167] *** Process received signal ***
[cuda:05167] Signal: Segmentation fault (11)
[cuda:05167] Signal code:  (128)
[cuda:05167] Failing at address: (nil)

Scatter and Gather may be roblem? How to change, to make it finally work?

Rag
  • 65
  • 9
  • 1
    It may not solve the problem but take a look at `mat_b[ti]=&mat_a[0][size*ti]; mat_c[ti]=&mat_a[0][size*ti];` ! – francis Jun 03 '16 at 07:18
  • 1
    Try also `MPI_Gather (mat_c[mat_start], size*size/ntasks, MPI_INT, mat_c[0], size*size/ntasks, MPI_INT, 0, MPI_COMM_WORLD);` Using `mat_c` instead of `mat_c[0]` can explain the segmentation fault. Notice that `mat_b` is scattered to `mat_a` ! – francis Jun 03 '16 at 07:30
  • Thank you @francis. Thing what is missing was mat_c[0]. Now everything works just fine. – Rag Jun 03 '16 at 18:15

2 Answers2

2

The problem is that you've declared a 2d array in C, but that's not what MPI is expecting!

MPI has no way of knowing that you have given a 2d array to it! MPI expects a contiguous array (of ints, in your case).

To solve your problem, you need to allocate pseudo-multidimensional array! This will guarantee that your memory is contiguous. After this you won't have the segmentation fault.

Snowman
  • 1,503
  • 1
  • 17
  • 39
2

The issue with the way the memory is allocated is that the 2D arrays is not contiguous in memory : malloc() is called once for each row. See sending blocks of 2D array in C using MPI

To change that, use the following procedure:

int n=42;
int** mat_a=malloc(n*sizeof(int*));
if(mat_a==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
mat_a[0]=malloc(n*n*sizeof(int));
if(mat_a[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
int i;
for(i=1;i<n;i++){
   mat_a[i]=&mat_a[0][n*i];
}
...
free(mat_a[0]);
free(mat_a);

Second, the value of pointers are only meaningful for a given process. Hence, sending a pointer from one process to another by doing MPI_Bcast(&mat_b,...) is erroneous. It can trigger a segmentation fault if mat_b is dereferenced after the message. The buffer can be sent instead:

MPI_Bcast(mat_a[0],n*n, MPI_INT,0,MPI_COMM_WORLD);

Minimal code to be compiled by mpicc main.c -o main -Wall and ran by mpirun -np 2 main:

#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>


int main(int argc,char *argv[])
{

    int  size, rank;
    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);
    MPI_Comm_size(MPI_COMM_WORLD,&size);    


    int n=42;
    int** mat_a=malloc(n*sizeof(int*));
    if(mat_a==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
    mat_a[0]=malloc(n*n*sizeof(int));
    if(mat_a[0]==NULL){fprintf(stderr,"malloc failed\n");exit(1);}
    int i;
    for(i=1;i<n;i++){
        mat_a[i]=&mat_a[0][n*i];
    }


    //populating the array
    int j;
    if(rank==0){
        for(i=0;i<n;i++){
            for(j=0;j<n;j++){
                mat_a[i][j]=i+j;
            }
        }
    }

    // Bcast the array
    MPI_Bcast(mat_a[0],n*n, MPI_INT,0,MPI_COMM_WORLD);

    if(rank==1){
        for(i=0;i<n;i++){
            for(j=0;j<n;j++){
                printf("%d ",mat_a[i][j] );
            }
            printf("\n");
        }
    }

    free(mat_a[0]);
    free(mat_a);

    MPI_Finalize();
    return 0;
}
Community
  • 1
  • 1
francis
  • 9,525
  • 2
  • 25
  • 41