I'm new to programming in general but especially MPI. I'm trying to scatter multiple arrays from the root processor to the other processors, perform some operations on those arrays then gather the data but it's scattering all the data to all the processors and the output adjacency matrices aren't correct so I'm assuming it's because I'm using scatterv and/or gatherv incorrectly. I'm not sure if I should scatter the matrices element by element or if there is a way to scatter an entire matrix. If you could take a look at my code any help would be much appreciated. Thanks!
int rank, size;
MPI_Status status;
MPI_Datatype strip;
bool passflag[Nmats];
MPI::Init();
rank = MPI::COMM_WORLD.Get_rank();
size = MPI::COMM_WORLD.Get_size();
int sendcounts[size], recvcounts, displs[size], rcounts[size];
if(rank == root){
fin.open(infname);
fout.open(outfname);
/* INPUT ADJ-MATS */
for(i = 0; i < Nmats; i++){
fin >> dummy;
for (j = 0; j < N; j++){
for (k = 0; k < N; k++) {
fin >> a[i][j][k];
}
}
}
}
/* Nmats = Number of matrices; N = nodes; Nmats isn't divisible by the number of processors */
Nmin= Nmats/size;
Nextra = Nmats%size;
k=0;
for(i=0; i<size; i++){
if( i < Nextra) sendcounts[i] = Nmin + 1;
else sendcounts[i] = Nmin;
displs[i] = k;
k = k + sendcounts[i];
}
recvcounts = sendcounts[rank];
MPI_Type_vector(Nmin, N, N, MPI_FLOAT, &strip);
MPI_Type_commit(&strip);
MPI_Scatterv(a, sendcounts, displs, strip, a, N*N, strip, 0, MPI_COMM_WORLD);
/* Perform operations on adj-mats */
for(i=0; i<size; i++){
if(i<Nextra) rcounts[i] = Nmin + 1;
else rcounts[i] = Nextra;
displs[i] = k;
k = k + rcounts[i];
}
MPI_Gatherv(&passflag, 1, MPI::BOOL, &passflag, rcounts , displs, MPI::BOOL, 0, MPI_COMM_WORLD);
MPI::Finalize();
//OUTPUT ADJ_MATS
for(i = 0; i < Nmats; i++) if (passflag[i]) {
for(j=0;j<N; j++){
for(k=0; k<N; k++){
fout << a[i][j][k] << " ";
}
fout << endl;
}
fout << endl;
}
fout << endl;
Hi I was able to get the code working for static allocation but the code "broke" more or less when I tried to dynamically allocate it. I'm not sure if I need to allocate memory outside of MPI or if this is something I should do after I initialize MPI. Any suggestions would be much appreciated!
//int a[Nmats][N][N];
/* Prior to adding this part of the code it ran fine, now it's no longer working */
int *** a = new int**[Nmats];
for(i = 0; i < Nmats; ++i){
a[i] = new int*[N];
for(j = 0; j < N; ++j){
a[i][j] = new int[N];
for(k = 0; k < N; k++){
a[i][j][k] = 0;
}
}
}
int rank, size;
MPI_Status status;
MPI_Datatype plane;
bool passflag[Nmats];
MPI::Init();
rank = MPI::COMM_WORLD.Get_rank();
size = MPI::COMM_WORLD.Get_size();
MPI_Type_contiguous(N*N, MPI_INT, &plane);
MPI_Type_commit(&plane);
int counts[size], recvcounts, displs[size+1];
if(rank == root){
fin.open(infname);
fout.open(outfname);
/* INPUT ADJ-MATS */
for(i = 0; i < Nmats; i++){
fin >> dummy;
for (j = 0; j < N; j++){
for (k = 0; k < N; k++) {
fin >> a[i][j][k];
}
}
}
}
Nmin= Nmats/size;
Nextra = Nmats%size;
k=0;
for(i=0; i<size; i++){
if( i < Nextra) counts[i] = Nmin + 1;
else counts[i] = Nmin;
displs[i] = k;
k = k + counts[i];
}
recvcounts = counts[rank];
displs[size] = Nmats;
MPI_Scatterv(&a[displs[rank]][0][0], counts, displs, plane, &a[displs[rank]][0][0], recvcounts, plane, 0, MPI_COMM_WORLD);
/* Perform operations on matrices */
MPI_Gatherv(&passflag[displs[rank]], counts, MPI::BOOL, &passflag[displs[rank]], &counts[rank], displs, MPI::BOOL, 0, MPI_COMM_WORLD);
MPI_Type_free(&plane);
MPI::Finalize();