我正在学习 MPI-2/MPI-3 中引入的 MPI 单面通信,并遇到了这个在线课程页面:MPI_Accumulate
MPI_Accumulate 允许调用者将移动到目标进程的数据与已经存在的数据结合起来,例如在目标进程中累积总和。通过使用 MPI_Get 检索数据(随后是同步)可以实现相同的功能;在调用者处执行求和运算;然后使用 MPI_Put 将更新的数据发送回目标进程。积累简化了这种混乱......
但是只有有限数量的操作允许与MPI_Accumulate
(max、min、sum、product 等)一起使用,并且不允许用户定义的操作。我想知道如何实现上面提到的混乱,使用MPI_Get
、同步、操作和MPI_Put
. 是否有任何 C/C++ 教程或工作代码示例?
谢谢
为了测试,我改编了这个SO question中的一段代码,其中使用单面通信来创建一个整数计数器,该计数器在 MPI 进程中保持同步。使用的目标问题行MPI_Accumulate
已标记。
代码按原样编译并在大约 15 秒内返回。但是当我尝试用MPI_Accumulate
问题行之后的注释块中所示的等效基本操作序列替换时,编译的程序无限期挂起。
谁能帮忙解释一下出了什么问题,MPI_Accumulate
在这种情况下正确的替换方法是什么?
PS我编译了代码
g++ -std=c++11 -I.. mpistest.cpp -lmpi
并执行二进制文件
mpiexec -n 4 a.exe
代码:
//adpated from https://stackoverflow.com/questions/4948788/
#include <mpi.h>
#include <stdlib.h>
#include <stdio.h>
#include <thread>
#include <chrono>
struct mpi_counter_t {
MPI_Win win;
int hostrank; //id of the process that host values to be exposed to all processes
int rank; //process id
int size; //number of processes
int val;
int *hostvals;
};
struct mpi_counter_t *create_counter(int hostrank) {
struct mpi_counter_t *count;
count = (struct mpi_counter_t *)malloc(sizeof(struct mpi_counter_t));
count->hostrank = hostrank;
MPI_Comm_rank(MPI_COMM_WORLD, &(count->rank));
MPI_Comm_size(MPI_COMM_WORLD, &(count->size));
if (count->rank == hostrank) {
MPI_Alloc_mem(count->size * sizeof(int), MPI_INFO_NULL, &(count->hostvals));
for (int i=0; i<count->size; i++) count->hostvals[i] = 0;
MPI_Win_create(count->hostvals, count->size * sizeof(int), sizeof(int),
MPI_INFO_NULL, MPI_COMM_WORLD, &(count->win));
}
else {
count->hostvals = NULL;
MPI_Win_create(count->hostvals, 0, 1,
MPI_INFO_NULL, MPI_COMM_WORLD, &(count->win));
}
count -> val = 0;
return count;
}
int increment_counter(struct mpi_counter_t *count, int increment) {
int *vals = (int *)malloc( count->size * sizeof(int) );
int val;
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, count->hostrank, 0, count->win);
for (int i=0; i<count->size; i++) {
if (i == count->rank) {
MPI_Accumulate(&increment, 1, MPI_INT, 0, i, 1, MPI_INT, MPI_SUM,count->win); //Problem line: increment hostvals[i] on host
/* //Question: How to correctly replace the above MPI_Accumulate call with the following sequence? Currently, the following causes the program to hang.
MPI_Get(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
MPI_Win_fence(0,count->win);
vals[i] += increment;
MPI_Put(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
MPI_Win_fence(0,count->win);
//*/
} else {
MPI_Get(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
}
}
MPI_Win_unlock(0, count->win);
//do op part of MPI_Accumulate's work on count->rank
count->val += increment;
vals[count->rank] = count->val;
//return the sum of vals
val = 0;
for (int i=0; i<count->size; i++)
val += vals[i];
free(vals);
return val;
}
void delete_counter(struct mpi_counter_t **count) {
if ((*count)->rank == (*count)->hostrank) {
MPI_Free_mem((*count)->hostvals);
}
MPI_Win_free(&((*count)->win));
free((*count));
*count = NULL;
return;
}
void print_counter(struct mpi_counter_t *count) {
if (count->rank == count->hostrank) {
for (int i=0; i<count->size; i++) {
printf("%2d ", count->hostvals[i]);
}
puts("");
}
}
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
const int WORKITEMS=50;
struct mpi_counter_t *c;
int rank;
int result = 0;
c = create_counter(0);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
srand(rank);
while (result < WORKITEMS) {
result = increment_counter(c, 1);
if (result <= WORKITEMS) {
printf("%d working on item %d...\n", rank, result);
std::this_thread::sleep_for (std::chrono::seconds(rand()%2));
} else {
printf("%d done\n", rank);
}
}
MPI_Barrier(MPI_COMM_WORLD);
print_counter(c);
delete_counter(&c);
MPI_Finalize();
return 0;
}
还有一个问题,我应该MPI_Win_fence
在这里使用而不是锁吗?
- 编辑 -
increment_counter
我按如下方式使用锁定/解锁,程序运行但行为奇怪。在最终的打印输出中,主节点完成所有工作。还是一头雾水。
int increment_counter(struct mpi_counter_t *count, int increment) {
int *vals = (int *)malloc( count->size * sizeof(int) );
int val;
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, count->hostrank, 0, count->win);
for (int i=0; i<count->size; i++) {
if (i == count->rank) {
//MPI_Accumulate(&increment, 1, MPI_INT, 0, i, 1, MPI_INT, MPI_SUM,count->win); //Problem line: increment hostvals[i] on host
///* //Question: How to correctly replace the above MPI_Accumulate call with the following sequence? reports that 0 does all the work
MPI_Get(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
MPI_Win_unlock(0, count->win);
vals[i] += increment;
MPI_Put(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, count->hostrank, 0, count->win);
//*/
} else {
MPI_Get(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
}
}
MPI_Win_unlock(0, count->win);
//do op part of MPI_Accumulate's work on count->rank
count->val += increment;
vals[count->rank] = count->val;
//return the sum of vals
val = 0;
for (int i=0; i<count->size; i++)
val += vals[i];
free(vals);
return val;
}