我正在学习 MPI-2 并尝试使用 MPI-2 单面通信来实现第一个简单的功能:
让进程 0 托管一个固定大小的数组data_size
。
每个进程(包括0)都会生成一个数组并与宿主数组进行比较:
如果生成数组的第一个元素小于宿主数组的第一个元素,则将宿主数组替换为生成的数组。
在代码中:
vector<int> v1 = {rank,rank+1,rank+2};
v = get_vec(vec);
if (v1[0] < v[0])
put_vec(vec,v1);
完整的代码在底部。当然,我希望在所有生成的数组中,具有最小头元素的数组最终应该在程序完成时出现在主机数组中,因为最小的数组(本例中为 [0,1,2])将替换其他数组并且不会自行更换。
但是,在某些(罕见的)场合,我得到了这样的输出:
$ mpiexec.exe -n 4 a.exe
#0 assigns v1 {0 ...} to host v {2 ...}
#1 assigns v1 {1 ...} to host v {2 ...}
1 2 3
,这似乎表明同时对主机数据进行了两次分配。我想我一定是误解了其中的锁定/解锁同步指令,get_vec/putvec
或者在其他地方犯了一些明显的错误。
有人可以解释一下我应该如何修复我的代码以获得预期的输出吗?
提前致谢。
使用以下代码编译的完整代码g++ -std=c++11 test.cpp -lmpi
:
#include <mpi.h>
#include <stdlib.h>
#include <stdio.h>
#include <thread>
#include <chrono>
#include <iostream>
#include <vector>
using namespace std;
struct mpi_vector_t {
MPI_Win win;
int hostrank; //id of the process that host values to be exposed to all processes
int rank; //process id
int size; //number of processes
int *data;
int data_size;
};
struct mpi_vector_t *create_vec(int hostrank, std::vector<int> v) {
struct mpi_vector_t *vec;
vec = (struct mpi_vector_t *)malloc(sizeof(struct mpi_vector_t));
vec->hostrank = hostrank;
vec->data_size = v.size();
MPI_Comm_rank(MPI_COMM_WORLD, &(vec->rank));
MPI_Comm_size(MPI_COMM_WORLD, &(vec->size));
if (vec->rank == hostrank) {
MPI_Alloc_mem(vec->data_size * sizeof(int), MPI_INFO_NULL, &(vec->data));
for (int i=0; i<vec->size; i++) vec->data[i] = v[i];
MPI_Win_create(vec->data, vec->data_size * sizeof(int), sizeof(int),
MPI_INFO_NULL, MPI_COMM_WORLD, &(vec->win));
}
else {
vec->data = NULL;
vec->data_size = v.size();
MPI_Win_create(vec->data, 0, 1,
MPI_INFO_NULL, MPI_COMM_WORLD, &(vec->win));
}
return vec;
}
void delete_vec(struct mpi_vector_t **count) {
if ((*count)->rank == (*count)->hostrank) {
MPI_Free_mem((*count)->data);
}
MPI_Win_free(&((*count)->win));
free((*count));
*count = NULL;
return;
}
std::vector<int> get_vec(struct mpi_vector_t *vec) {
vector<int> ret(vec->data_size);
MPI_Win_lock(MPI_LOCK_SHARED, vec->hostrank, 0, vec->win);
MPI_Get(&ret.front(), vec->data_size, MPI_INT, vec->hostrank, 0, vec->data_size, MPI_INT, vec->win);
MPI_Win_unlock(0, vec->win);
return ret;
}
void put_vec(struct mpi_vector_t *vec, std::vector<int> v) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, vec->hostrank, 0, vec->win);
MPI_Put(&v.front(), vec->data_size, MPI_INT, vec->hostrank, 0, vec->data_size, MPI_INT, vec->win);
MPI_Win_unlock(0, vec->win);
}
void print_vec(struct mpi_vector_t *vec) {
if (vec->rank == vec->hostrank) {
for (int i=0; i<vec->data_size; i++) {
printf("%2d ", vec->data[i]);
}
puts("");
}
}
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
struct mpi_vector_t *vec;
int rank;
vector<int> v = {2,3,1};
vec = create_vec(0, v);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (int itest = 0; itest < 2; itest++) {
vector<int> v1 = { rank, rank + 1, rank + 2 }; //some generated data
v = get_vec(vec);
if (v1[0] < v[0]) {
cout << "#" << rank << " assigns v1 {" << v1[0] <<
" ...} to host v {" << v[0] << " ...}" << endl;
put_vec(vec, v1);
}
}
MPI_Barrier(MPI_COMM_WORLD);
print_vec(vec);
delete_vec(&vec);
MPI_Finalize();
return 0;
}