8

在 c++/mpi (mpich2) 中有一个简单的程序,它发送一个 double 类型的数组。如果数组的大小超过 9000,那么在调用 MPI_Send 期间我的程序会挂起。如果数组小于 9000(例如 8000)程序可以正常工作。源代码如下:

主文件

using namespace std;

Cube** cubes;
int cubesLen;

double* InitVector(int N) {
   double* x = new double[N];
   for (int i = 0; i < N; i++) {
       x[i] = i + 1;
   }
   return x;
}

void CreateCubes() {
    cubes = new Cube*[12];
    cubesLen = 12;
    for (int i = 0; i < 12; i++) {
       cubes[i] = new Cube(9000);
    }
}

void SendSimpleData(int size, int rank) {
    Cube* cube = cubes[0];
    int nodeDest = rank + 1;
    if (nodeDest > size - 1) {
        nodeDest = 1;
    }

    double* coefImOut = (double *) malloc(sizeof (double)*cube->coefficentsImLength);
    cout << "Before send" << endl;
    int count = cube->coefficentsImLength;
    MPI_Send(coefImOut, count, MPI_DOUBLE, nodeDest, 0, MPI_COMM_WORLD);
    cout << "After send" << endl;
    free(coefImOut);

    MPI_Status status;
    double *coefIm = (double *) malloc(sizeof(double)*count);

    int nodeFrom = rank - 1;
    if (nodeFrom < 1) {
        nodeFrom = size - 1;
    }

    MPI_Recv(coefIm, count, MPI_DOUBLE, nodeFrom, 0, MPI_COMM_WORLD, &status);
    free(coefIm);
}

int main(int argc, char *argv[]) {
    int size, rank;
    const int root = 0;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    CreateCubes();

    if (rank != root) {
         SendSimpleData(size, rank);
    }

    MPI_Finalize();
    return 0;
}

类立方体

 class Cube {
 public:
    Cube(int size);
    Cube(const Cube& orig);
    virtual ~Cube();

    int Id() { return id; } 
    void Id(int id) { this->id = id; }

    int coefficentsImLength;
    double* coefficentsIm;

private:
    int id;
};

Cube::Cube(int size) {
    this->coefficentsImLength = size;

    coefficentsIm = new double[size];
    for (int i = 0; i < size; i++) {
        coefficentsIm[i] = 1;
    }
}

Cube::Cube(const Cube& orig) {
}

Cube::~Cube() {
    delete[] coefficentsIm;
}

该程序在 4 个进程上运行:

mpiexec -n 4 ./myApp1

有任何想法吗?

4

1 回答 1

18

Cube 类的细节在这里不相关:考虑一个更简单的版本

#include <mpi.h>
#include <cstdlib>

using namespace std;

int main(int argc, char *argv[]) {
    int size, rank;
    const int root = 0;

    int datasize = atoi(argv[1]);

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank != root) {
        int nodeDest = rank + 1;
        if (nodeDest > size - 1) {
            nodeDest = 1;
        }
        int nodeFrom = rank - 1;
        if (nodeFrom < 1) {
            nodeFrom = size - 1;
        }

        MPI_Status status;
        int *data = new int[datasize];
        for (int i=0; i<datasize; i++)
            data[i] = rank;

        cout << "Before send" << endl;
        MPI_Send(&data, datasize, MPI_INT, nodeDest, 0, MPI_COMM_WORLD);
        cout << "After send" << endl;
        MPI_Recv(&data, datasize, MPI_INT, nodeFrom, 0, MPI_COMM_WORLD, &status);

        delete [] data;

    }

    MPI_Finalize();
    return 0;
}

跑步的地方

$ mpirun -np 4 ./send 1
Before send
After send
Before send
After send
Before send
After send
$ mpirun -np 4 ./send 65000
Before send
Before send
Before send

如果在 DDT 中查看消息队列窗口,您会看到每个人都在发送,没有人在接收,并且您遇到了典型的死锁

MPI_Send奇怪的是,它的语义没有很好地定义,但它被允许阻塞,直到“接收已发布”。 MPI_Ssend在这方面更清楚;它会一直阻塞,直到接收已发布。有关不同发送模式的详细信息,请参见此处

它适用于较小消息的原因是实施的意外;对于“足够小”的消息(对于您的情况,它看起来小于 64kB),您的 MPI_Send 实现使用“急切发送”协议并且不会阻塞接收;对于较大的消息,仅仅将消息的缓冲副本保留在内存中并不一定安全,Send 等待匹配的接收(无论如何它总是允许这样做)。

您可以采取一些措施来避免这种情况;您所要做的就是确保不是每个人都在同时调用阻塞 MPI_Send。你可以(比如说)让偶数处理器先发送,然后接收,奇数处理器先接收,然后发送。您可以使用非阻塞通信(Isend/Irecv/Waital)。但在这种情况下,最简单的解决方案是使用MPI_Sendrecv,这是一个阻塞(发送 + 接收),而不是阻塞发送加阻塞接收。发送和接收将同时执行,并且该函数将阻塞,直到两者都完成。所以这行得通

#include <mpi.h>
#include <cstdlib>

using namespace std;

int main(int argc, char *argv[]) {
    int size, rank;
    const int root = 0;

    int datasize = atoi(argv[1]);

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank != root) {
        int nodeDest = rank + 1;
        if (nodeDest > size - 1) {
            nodeDest = 1;
        }
        int nodeFrom = rank - 1;
        if (nodeFrom < 1) {
            nodeFrom = size - 1;
        }

        MPI_Status status;
        int *outdata = new int[datasize];
        int *indata  = new int[datasize];
        for (int i=0; i<datasize; i++)
            outdata[i] = rank;

        cout << "Before sendrecv" << endl;
        MPI_Sendrecv(outdata, datasize, MPI_INT, nodeDest, 0,
                     indata, datasize, MPI_INT, nodeFrom, 0, MPI_COMM_WORLD, &status);
        cout << "After sendrecv" << endl;

        delete [] outdata;
        delete [] indata;
    }

    MPI_Finalize();
    return 0;
}

运行给

$ mpirun -np 4 ./send 65000
Before sendrecv
Before sendrecv
Before sendrecv
After sendrecv
After sendrecv
After sendrecv
于 2013-04-05T15:22:10.847 回答