I want to extend this example by Jonathan Dursi for unequal 2D array using MPI_Scatterv
and MPI_Gatherv
. Basically, I have 4 local processes, each holds an array with different sizes:
Process = 0
|00000|
|00000|
|00000|
Process = 1
|1111|
|1111|
|1111|
Process = 2
|22222|
|22222|
Process = 3
|33333|
|33333|
I gathered them to the master process, and I expected to get:
Master = 0
|000001111|
|000001111|
|000001111|
|222223333|
|222223333|
However, below is what I got from my code.
Master process
|000001111|
|100000111|
|110000011|
|222223333|
|322222333|
I think there was something wrong with my MPI_Type_vector
. Any suggestions to fix this.
Below is my code:
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <mpi.h>
int main(int argc, char **argv)
{
int rank, size; // rank of current process and no. of processes
int domain_x, domain_y;
int global_x, global_y;
int topx = 2, topy = 2;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0)
{
domain_x = 5;
domain_y = 3;
}
if (rank == 1)
{
domain_x = 4;
domain_y = 3;
}
if (rank == 2)
{
domain_x = 5;
domain_y = 2;
}
if (rank == 3)
{
domain_x = 4;
domain_y = 2;
}
global_x = 9;
global_y = 5;
int *glob_data = new int[global_x*global_y];
// Initialize local data
int *local_data = new int[domain_x*domain_y];
for (int i = 0; i < domain_x; ++i)
{
for (int j = 0; j < domain_y; ++j)
{
local_data[j*domain_x+i] = rank;
}
}
for (int p=0; p<size; p++) {
if (p == rank) {
printf("rank = %d\n", rank);
for (int j=0; j<domain_y; j++) {
for (int i=0; i<domain_x; i++) {
printf("%3d ",(int)local_data[j*domain_x+i]);
}
printf("\n");
}
printf("\n");
}
MPI_Barrier(MPI_COMM_WORLD);
}
MPI_Datatype blocktype, blocktype2;
MPI_Type_vector(domain_y, domain_x, topx*domain_x, MPI_INT, &blocktype2);
MPI_Type_create_resized(blocktype2, 0, sizeof(int), &blocktype);
MPI_Type_commit(&blocktype);
int *displs = new int[topx*topy];
int *counts = new int[topx*topy];
for (int j=0; j<topy; j++) {
for (int i=0; i<topx; i++) {
displs[j*topx+i] = j*global_x*domain_y + i*domain_x;
counts [j*topx+i] = 1;
}
}
MPI_Gatherv(local_data, domain_x*domain_y, MPI_INT, glob_data, counts, displs, blocktype, 0, MPI_COMM_WORLD);
if (rank == 0)
{
printf("Master process = %d\n", rank);
for (int j=0; j<global_y; j++) {
for (int i=0; i<global_x; i++) {
printf("%d ", glob_data[j*global_x+i]);
}
printf("\n");
}
}
MPI_Type_free(&blocktype);
MPI_Type_free(&blocktype2);
MPI_Finalize();
return 0;
}