0

first, I apologize for my very bad English. I wrote a code series and parallel to solve Laplace equation in 2d. The number of nodes receive from users(in my code, I receive number of node not size of ractangle), and a 2-dimensional matrix defined. in series code, I can import a large number of nodes (1000<), but in parallel I can import only 80 nodes. And if I import a larger number, I receive following error.at first, I defined the matrix's normally.also when I use "malloc" for define and Allocate memory for 2-dimensional array,for Any number of nodes, I receive same(even Less than 80).

[handicraft-ThinkPad:03040] *** Process received signal ***
[handicraft-ThinkPad:03040] Signal: Segmentation fault (11)
[handicraft-ThinkPad:03040] Signal code: Address not mapped (1)
[handicraft-ThinkPad:03040] Failing at address: 0x1f4
[handicraft-ThinkPad:03040] [ 0] /lib/x86_64-linux-gnu/libpthread.so.0(+0x11390)[0x7fd1a1728390]
[handicraft-ThinkPad:03040] [ 1] l[0x4010e0]
[handicraft-ThinkPad:03040] [ 2] /lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xf0)[0x7fd1a136e830]
[handicraft-ThinkPad:03040] [ 3] l[0x400b29]
[handicraft-ThinkPad:03040] *** End of error message ***
--------------------------------------------------------------------------
mpirun noticed that process rank 0 with PID 3040 on node handicraft-ThinkPad exited on signal 11 (Segmentation fault).

where is the problem?how can I import larg number of node in parallel code just like series code? my code is here:

#include <math.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
float **floatalloc2d(int n, int m);
int main()

{
    int rank,size;
    double start_t,end_t;
    MPI_Init (NULL,NULL);
    MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Comm_size (MPI_COMM_WORLD, &size);
float k,b_left,b_right,b_up,b_down;
int l_type,u_type,r_type,d_type,i,j,n,flag;

//scan data from user
if (rank==0)
{

    printf("Enter number of node: \n");
    scanf("%d",&n);
    printf("Enter the k factor: \n");
    scanf("%f",&k);
    printf("Enter type of left boundary conditions: 0 for dirichlet and 1 for Neumann \n");
    scanf("%d",&l_type);
    printf("Enter left boundary conditions:\n");
    scanf("%f",&b_left);  
}
//calculate the time
start_t=MPI_Wtime();
MPI_Bcast(&n,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&k,1,MPI_FLOAT,0,MPI_COMM_WORLD);

int cond=0,dx=1,dy=1,step=n/size,snd_num=step*n,rcv_num=step*n;

//float t1[n][n],t2[n][n],t3[step][n],t4[step][n];
float error;
float** t1 = floatalloc2d(n, n);
float** t2 = floatalloc2d(n, n);
float** t3 = floatalloc2d(step, n);
float** t4 = floatalloc2d(step, n);
//comput with guass-sidel
for (int z=0;z<1000;z++)
{
    //send data to all process
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Scatter(t1,snd_num,MPI_FLOAT,t3,rcv_num,MPI_FLOAT,0,MPI_COMM_WORLD);
    //comput in each process
    for (i=1;i<(step-1);i++)
    {
        for (j=1;j<(n-1);j++)
        {
            t4[i][j]=0.25*(t3[i-1][j]+t3[i+1][j]+t3[i][j-1]+t3[i][j+1]);
            error=fabs(t4[i][j]-t3[i][j]);
            t3[i][j]=t4[i][j];
            //cout<<i<<","<<j<<":  ";
            //cout<<"error= "<<error<<"\n";
        }
    }
    //collect data from all process
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Gather(&t3,snd_num,MPI_FLOAT,&t1,rcv_num,MPI_FLOAT,0,MPI_COMM_WORLD);
    //review
}
end_t=MPI_Wtime();
MPI_Finalize();
}
float **floatalloc2d(int n, int m) {
float *data = (float *)malloc(n*m*sizeof(float));
float **array = (float **)malloc(n*sizeof(float *));
for (int i=0; i<n; i++)
    array[i] = &(data[i*m]);

return array;
}

Thank you very much for your answer

sajad.k
  • 1
  • 1

1 Answers1

0

You are trying to scatter elements of a matrix, but t1 is just an array of pointers - that doesn't match. You should use a contiguous data structure for your matrices. An example how this can be done is given by this answer:

float **floatalloc2d(int n, int m) {
    float *data = (float *)malloc(n*m*sizeof(float));
    float **array = (float **)calloc(n*sizeof(float *));
    for (int i=0; i<n; i++)
        array[i] = &(data[i*m]);

    return array;
}

float floatfree2d(float **array) {
    free(array[0]);
    free(array);
    return;
}

float** t1 = floatalloc2d(n, n);

...

MPI_Scatter(t1[0],snd_num,MPI_FLOAT,t3[0],rcv_num,MPI_FLOAT,0,MPI_COMM_WORLD);
Community
  • 1
  • 1
Zulan
  • 21,896
  • 6
  • 49
  • 109
  • I applied your guide to code like this: float * data = (float *)malloc(n * n * sizeof(float)); float * * t1 = (float * * )malloc(n * sizeof(float *)); for (int i=0; i – sajad.k May 11 '17 at 10:30
  • Use `floatalloc2d` / `floatfree2d` exclusively, and if the issue persists add the updated code to the question - I cannot possibly help you debug based on a few lines dropped in a comment. – Zulan May 11 '17 at 12:15
  • I update the code. i'm new in stackoverflow. sorry about that – sajad.k May 13 '17 at 08:57
  • You must use `t1[0]` or `&t1[0][0]` as buffer argument to the MPI functions. – Zulan May 13 '17 at 09:21
  • i'm do that. but nothing changed – sajad.k May 14 '17 at 07:43