0

I have a matrix which I split into parts. Each part is worked upon by a processor, Finally i want each processor to have the full matrix. For e.g. A[100][4], split into 2 processor X works on rows 0 to 49, processor Y works on rows 50 to 99. Finally i want both processors to have A[100][4] with new values. Here is my code.

#include"mpi.h"
#include<iostream>
#include<cmath>

using namespace std;

#define MASTER  0
#define BEGIN   1


const int COLS=100;

int main(int argc, char *argv[])
{
     double t1=MPI_Wtime(),trans_grad[COLS][8];
    int taskid,
    numtasks,
    numworkers,columns,
    column_mean,
    offset,
    tag=BEGIN,
    mtag,
    source,
    ext,left,
    size,
    msgtype,dest;
    MPI_Status status;

MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
numworkers=numtasks-1;

int columns_send[numworkers+1],sum_s;
column_mean=COLS/numworkers;
ext=COLS%numworkers;
offset=0;

if (taskid==MASTER)
{
    for (int i=1;i<=numworkers;i++)
    {

        dest=i;
        columns=(i<=ext) ? column_mean+1:column_mean;

        MPI_Send(&offset, 1, MPI_INT, dest, tag, MPI_COMM_WORLD);        
        MPI_Send(&columns, 1, MPI_INT, dest, tag, MPI_COMM_WORLD);

        offset=offset+columns;            

    }
    MPI_Finalize();

}

if (taskid!=MASTER)
{
    for (int i=0;i<COLS;i++)
    {
        for (int j=0;j<8;j++)
        {
            trans_grad[i][j]=0.0;
        }
    }

    source=MASTER;
    msgtype=BEGIN;

    MPI_Recv(&offset, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, &status);
    MPI_Recv(&columns, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, &status);

    columns_send[taskid]=columns;

    for (int i=offset;i<offset+columns;i++)
    {
        for (int j=0;j<8;j++)
        {
            trans_grad[i][j]=taskid+1.5;
        }
    }


    for (int j=1;j<=numworkers;j++)
    {
       if (j!=taskid)
        {
            mtag=taskid;
            source=j;

            MPI_Send(&columns_send[taskid], 1, MPI_INT, j, mtag, MPI_COMM_WORLD);                
            MPI_Recv(&columns_send[j], 1, MPI_INT, source, source, MPI_COMM_WORLD,&status);              
        }
    }

    sum_s=0;
    for (int yy=taskid-1;yy>=1;yy--)
    {
        sum_s=sum_s-columns_send[yy];
    }

    for (int j=1;j<=numworkers;j++)
    {
       if (j!=taskid)
        {
            mtag=taskid;
            source=j;
            MPI_Send(&trans_grad[offset][0], columns_send[taskid]*8, MPI_DOUBLE, j, mtag, MPI_COMM_WORLD);
            MPI_Recv(&trans_grad[offset+sum_s][0], columns_send[j]*8, MPI_DOUBLE, source, source, MPI_COMM_WORLD,&status);              
        }
        sum_s=sum_s+columns_send[j];                
    }
    MPI_Finalize();
}

} The program works fine with COLS=100 and 150 but it just gets stuck for COLS =500, 1000. I have no idea why this is happening.

shekhar
  • 23
  • 4
  • 1
    please edit your post to include a program that compiles. also, if some stuff is not needed to evidence the hang, please trim it. – Gilles Gouaillardet Oct 17 '17 at 07:49
  • Duplicate https://stackoverflow.com/questions/15833947/mpi-hangs-on-mpi-send-for-large-messages/15837635#15837635 (there are probably many others). – Zulan Oct 17 '17 at 08:46
  • @Zulan Thank you. Using Sendrecv solved the issue. – shekhar Oct 22 '17 at 09:13

0 Answers0