1

I'm trying to gather some rows of different matrices from nodes in an MPI configuration. So far I've got the program to receive one row to another process with the code I have below, i.e. the code will change the matrix recv to the numbers 1..7 but ideally what I'd like it to do is change the first two rows, numbers 1..7 on the first row and 8..14 on the second, but this doesn't happen when I change the send/receive count on line 55/57. The blocks should be laid out contiguously in memory so I'm not sure where I'm going wrong currently, any help would be appreciated.

Code:

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
double **allocMatrix(int dim) {
    int i;
    double **matrix;
    matrix = (double **)malloc(dim*sizeof(double *));
    for(i=0; i < dim; i++) {
        matrix[i] = (double *)malloc(dim*sizeof(double));
    }
    return matrix;
}
void printMatrix(double **values, int size) {
    int i, j;

    for (i = 0; i < size; i++) {
        for (j = 0; j < size; j++) {
            printf("%10lf ", values[i][j]);
        }
        printf("\n");
    }
}
int main(int argc, char* argv[]) {
    MPI_Init(&argc, &argv);
    int size, rank, i, j;
    int dimensions = 7;
    MPI_Comm_size(MPI_COMM_WORLD, &size);//number of processes
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);//rank for each process

    double **send = allocMatrix(dimensions);
    double **recv = allocMatrix(dimensions);
    int count = 0;
    for (i=0; i<dimensions; i++) {
        for (j=0; j<dimensions; j++) {
            if (rank == 0) {
                recv[i][j] = 0;
            } else {
                send[i][j] = ++count;
            }
        }
    }

    MPI_Datatype arrType;
    MPI_Type_vector(1, dimensions, 0, MPI_DOUBLE, &arrType);
    MPI_Type_commit(&arrType);
    int recvCounts[size];
    int displs[size];
    recvCounts[0] = 0;
    displs[0] = 0;
    recvCounts[1] = 1;
    displs[1] = 0;
    MPI_Gatherv(&(send[0][0]), 1, arrType,
         &(recv[0][0]), recvCounts, displs, arrType,
         0, MPI_COMM_WORLD);

    if (rank == 0) {
        printMatrix(recv, dimensions);
    }


    MPI_Finalize();
    return 0;
}

Output:

make gatherv
mpicc -Wall -o gatherv gatherv.c && ./gather
  1.000000   2.000000   3.000000   4.000000   5.000000   6.000000   7.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 

Desired output:

  1.000000   2.000000   3.000000   4.000000   5.000000   6.000000   7.000000 
  8.000000   9.000000   10.00000   11.00000   12.00000   13.00000   14.00000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
  0.000000   0.000000   0.000000   0.000000   0.000000   0.000000   0.000000 
micr0sub
  • 13
  • 4
  • I think you might have a logic error in your `MPI_Gatherv`, all your processes are sending the same portion of the array, which is initialized in exactly the same way on non-zero rank processes. – struct Dec 19 '14 at 11:15
  • @RichardTownsend Sorry, forgot to add I'm only running this on two processes at the moment - I know how to extend it further but I wanted to figure out how to send multiple rows first before I continue on. – micr0sub Dec 19 '14 at 11:23

1 Answers1

1

There are two points that can be modified in this code :

  • The allocation of the matrix works well and it may sometimes be useful, but rows are not contiguous in memory since malloc() is called dim+1 times. You may ensure that rows are contiguous in memory by allocating space for all values at once like in this answer and many others. This kind of 2D array is the one that could be used by libraries like lapack and fftw...and MPI_Gatherv() will like it as well. malloc() will be called twice : once for pointers to rows and once for values.

  • If you change values in recvCounts[], the number of items to send must change accordingly. Since all processes populate recvCounts[] in the same way, and since receive type and send type are the same (arrType), using recvCounts[rank] as the second argument of MPI_Gatherv() is a good trick.

By the way, it a really nice question for a first one !

Here goes the code which can be compiled by mpicc main.c -o main and ran by mpirun -np 42 main

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
double **allocMatrix(int dim) {
    int i;
    double **matrix;
    //allocate space for values at once, so as to be contiguous in memory
    matrix = (double **)malloc(dim*sizeof(double *));
    matrix[0] = (double *)malloc(dim*dim*sizeof(double));
    for(i=1; i < dim; i++) {
        matrix[i]=&matrix[0][dim*i];
        //matrix[i] = (double *)malloc(dim*sizeof(double));
    }
    return matrix;
}
void printMatrix(double **values, int size) {
    int i, j;

    for (i = 0; i < size; i++) {
        for (j = 0; j < size; j++) {
            printf("%10lf ", values[i][j]);
        }
        printf("\n");
    }
}
//function to free the matrix
void freeMatrix(double **values) {
    free(values[0]);
    free(values);
}

int main(int argc, char* argv[]) {
    MPI_Init(&argc, &argv);
    int size, rank, i, j;
    int dimensions = 7;
    MPI_Comm_size(MPI_COMM_WORLD, &size);//number of processes
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);//rank for each process

    double **send = allocMatrix(dimensions);
    double **recv = allocMatrix(dimensions);
    int count = 0;
    for (i=0; i<dimensions; i++) {
        for (j=0; j<dimensions; j++) {
            if (rank == 0) {
                recv[i][j] = 0;
            } else {
                send[i][j] = ++count;
            }
        }
    }

    MPI_Datatype arrType;
    MPI_Type_vector(1, dimensions, 0, MPI_DOUBLE, &arrType);
    MPI_Type_commit(&arrType);
    int recvCounts[size];
    int displs[size];
    // a loop to initialize counts and displacements
    for(i=0;i<size;i++){
        recvCounts[i]=0;
        displs[i]=0;
    }
    recvCounts[0] = 0;
    displs[0] = 0;
    if(size>1){
        recvCounts[1] = 2;// two lines sent
        displs[1] = 0;//to the start of matrix
    }
    //second argument of mpi_gatherv() is now recvCounts[rank]
    MPI_Gatherv(&(send[0][0]), recvCounts[rank], arrType,
            &(recv[0][0]), recvCounts, displs, arrType,
            0, MPI_COMM_WORLD);

    if (rank == 0) {
        printMatrix(recv, dimensions);
    }

    //free the matrices
    freeMatrix(recv);
    freeMatrix(send);

    MPI_Finalize();
    return 0;
}
Community
  • 1
  • 1
francis
  • 9,525
  • 2
  • 25
  • 41
  • This is perfect, thanks! Now just to find out how to translate this from a n x n -> n x n to n x n -> m x n.... – micr0sub Dec 21 '14 at 02:47