我实际上想出了如何做到这一点。这个问题可能应该被删除。但是由于我是 MPI 的新手,所以我会在这里发布这些解决方案,如果有人有改进建议,我会很高兴他们分享。方法一:
// Fox's algorithm
double * b_buffers[2];
b_buffers[0] = (double *) malloc(n_local*n_local*sizeof(double));
b_buffers[1] = b;
for (stage =0;stage < q; stage++){
//copying a into a_temp and Broadcasting a_temp of each proccess to all other proccess in its row
for (i=0;i< n_local*n_local; i++)
a_temp[i]=a[i];
if (stage == 0) {
MPI_Bcast(a_temp, n_local*n_local, MPI_DOUBLE, (rowID + stage) % q , row_comm);
multiplyMatrix(a_temp,b,c,n_local);
MPI_Isend(b, n_local*n_local, MPI_DOUBLE, nbrs[UP], 111, grid_comm,&my_request1);
MPI_Irecv(b, n_local*n_local, MPI_DOUBLE, nbrs[DOWN], 111, grid_comm,&my_request2);
MPI_Wait(&my_request2, &status);
MPI_Wait(&my_request1, &status);
}
if (stage > 0)
{
//shifting b values in all procces
MPI_Bcast(a_temp, n_local*n_local, MPI_DOUBLE, (rowID + stage) % q , row_comm);
MPI_Isend(b_buffers[(stage)%2], n_local*n_local, MPI_DOUBLE, nbrs[UP], 111, grid_comm,&my_request1);
MPI_Irecv(b_buffers[(stage+1)%2], n_local*n_local, MPI_DOUBLE, nbrs[DOWN], 111, grid_comm,&my_request2);
multiplyMatrix(a_temp, b_buffers[(stage)%2], c, n_local);
MPI_Wait(&my_request2, &status);
MPI_Wait(&my_request1, &status);
}
}
方法二:
// Fox's algorithm
for (stage =0;stage < q; stage++){
//copying a into a_temp and Broadcasting a_temp of each proccess to all other proccess in its row
for (i=0;i< n_local*n_local; i++)
a_temp[i]=a[i];
if (stage == 0) {
MPI_Bcast(a_temp, n_local*n_local, MPI_DOUBLE, (rowID + stage) % q , row_comm);
multiplyMatrix(a_temp,b,c,n_local);
MPI_Isend(b, n_local*n_local, MPI_DOUBLE, nbrs[UP], 111, grid_comm,&my_request1);
MPI_Irecv(b, n_local*n_local, MPI_DOUBLE, nbrs[DOWN], 111, grid_comm,&my_request2);
MPI_Wait(&my_request2, &status);
MPI_Wait(&my_request1, &status);
}
if (stage > 0)
{
//shifting b values in all proccess
memcpy(b_temp, b, n_local*n_local*sizeof(double));
MPI_Bcast(a_temp, n_local*n_local, MPI_DOUBLE, (rowID + stage) % q , row_comm);
MPI_Isend(b, n_local*n_local, MPI_DOUBLE, nbrs[UP], 111, grid_comm,&my_request1);
MPI_Irecv(b, n_local*n_local, MPI_DOUBLE, nbrs[DOWN], 111, grid_comm,&my_request2);
multiplyMatrix(a_temp, b_temp, c, n_local);
MPI_Wait(&my_request2, &status);
MPI_Wait(&my_request1, &status);
}
这两种方法似乎都有效,但正如我所说,我是 MPI 的新手,如果您有任何意见或建议,请分享。