0

我定义了一个大小为 grid_size 的方阵并在其内部(grid_size-2)工作,同时我将下一个外边缘发送到其他进程。我定义了一个环形拓扑,因此每个子矩阵进程都可以轻松计算其邻居。虽然行(比如[1][1]until [1][grid_size-2])被正确发送,但列(比如[1][1]until [grid_size-2][1])没有正确发送——我MPI_Type_contiguous用于行,而MPI_Type_vector用于列——我检查空矩阵(它们是字符矩阵,所以我将它们初始化为\0),而行总是作为 0 发送,列在(半)随机位置不同。我错过了什么?

typedef char bool;
typedef bool **grid_t;

/* create a torroid topology */
void cart_create(MPI_Comm *new_comm, int Proc_Root) {
    int reorder = 1; /* allows processes reordered for efficiency */
    int periods[2], dim_size[2];
    dim_size[0] = Proc_Root; /* rows */
    dim_size[1] = Proc_Root; /* columns */
    periods[0] = 1; /* row periodic (each column forms a ring) */
    periods[1] = 1; /* columns periodic (each column forms a ring) */
    int comm_size;
    MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
    MPI_Cart_create(MPI_COMM_WORLD, 2, dim_size, periods, reorder, new_comm);
}

int main(int argc, char** argv) {

    /* ! MPI ! */
    MPI_Init(&argc, &argv);
    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    int Num_of_Proc;
    MPI_Comm_size(MPI_COMM_WORLD, &Num_of_Proc);
    int Proc_Root = sqrt(Num_of_Proc);
    int Inner_Grid_Size = Num_of_Rows / Proc_Root; //size of process'submarix
    int Grid_Size = Inner_Grid_Size + 2; //grid size plus the ghost shells

    /* topology */
    MPI_Comm new_comm;
    cart_create(&new_comm, Proc_Root);

    /* allocate the grid */
    grid_t grid;
    create_grid(&grid, Grid_Size); // I fill it with 0
    grid_t grid2;
    create_empty_grid(&grid2, Grid_Size);
    grid_t new, old;

    bool *north_row = malloc(Inner_Grid_Size * sizeof *north_row);
    bool *south_row = malloc(Inner_Grid_Size * sizeof *south_row);
    bool *west_column = malloc(Inner_Grid_Size * sizeof *west_column);
    bool *east_column = malloc(Inner_Grid_Size * sizeof *east_column);
    // Works !
    MPI_Datatype rowtype;
    MPI_Type_contiguous(Inner_Grid_Size, MPI_CHAR, &rowtype); // MPI_C_BOOL
    MPI_Type_commit(&rowtype);
    // Where is the bug ?
    MPI_Datatype columntype;
    MPI_Type_vector(Inner_Grid_Size, 1, Grid_Size, MPI_CHAR, &columntype);
    MPI_Type_commit(&columntype);

    for (int k = 0; k < generations; k++) {
        if (k % 2) {
            old = grid2;
            new = grid;
        } else {
            old = grid;
            new = grid2;
        }
        MPI_Status status[16];
        MPI_Request reqs[16];
        MPI_Isend(&old[Inner_Grid_Size][1], 1, rowtype, neighboors_ranks[S],
                S, new_comm, &reqs[S]); //send to S
        MPI_Irecv(north_row, Inner_Grid_Size, MPI_CHAR, neighboors_ranks[N],
                S, new_comm, &reqs[S + EIGHT]); //receive from N
        // above works
        // below not
        MPI_Isend(&old[1][1], 1, columntype, neighboors_ranks[W], W,
                new_comm, &reqs[W]); //send to W
        MPI_Irecv(east_column, Inner_Grid_Size, MPI_CHAR, neighboors_ranks[E],
                W, new_comm, &reqs[W + EIGHT]); //receive from E
        MPI_Isend(&old[1][Inner_Grid_Size], 1, columntype, neighboors_ranks[E],
                E, new_comm, &reqs[E]); //send to E
        MPI_Irecv(west_column, Inner_Grid_Size, MPI_CHAR, neighboors_ranks[W],
                E, new_comm, &reqs[E + EIGHT]); //receive from W

        MPI_Waitall(EIGHT, reqs + EIGHT, status + EIGHT); //Wait receives
        if (rank == root)
            for (int p = 0; p < Inner_Grid_Size; p++) {
                printf("east[%d]=%d\n", p, east_column[p]); // should be 0 !?
                //  printf("north,%d\n", north_row[p]); // prints ok
                printf("west[%d]=%d\n", p, west_column[p]); // should be 0 !?
            }
        //...
    }
}

编辑:分配

void create_grid(grid_t *grid, int size) {
    srand(time(NULL) ^get_rank() << 16);
    if ((*grid = malloc(size * (sizeof **grid))) == NULL) return;
    for (int i = 0; i < size; ++i) {
        (*grid)[i] = malloc(size * (sizeof *((*grid)[i])));
        for (int j = 0; j < size; ++j) {
            (*grid)[i][j] = 0; /*was random */
        }
    }
}

/* the grid will be full of 0 */
void create_empty_grid(grid_t *grid, int size) {
    if ((*grid = malloc(size * (sizeof **grid))) == NULL) return;
    // the outer edges will be filled by the other processes
    for (int i = 0; i < size; ++i) {
        (*grid)[i] = malloc(size * (sizeof *((*grid)[i])));
        memset((*grid)[i], 0, sizeof (*(*grid)[i]) * size);
    }
}

void print_grid(grid_t grid, int start, int size) {
    for (int i = start; i < size; ++i) {
        for (int j = start; j < size; ++j) {
            if (grid[i][j]) {
                printf("@");
            } else {
                printf(".");
            }
        }
        printf("\n");
    }
    printf("\n");
}
4

1 回答 1

1

当使用 MPI 处理 C 中的“多维数组”时,这经常出现在这里(例如,参见这个问题/答案这个)。这不是真正的 MPI 事物,而是 C 事物。

在 C 中分配数组数组以获得多维数组的标准方法并没有为您提供连续的内存块。每行(例如, each malloc)都是单独连续的,但下一行可以在内存中的任何其他位置。

因此,跳过项目以查找列中的下一个项目的公式Grid_Size将不起作用(并且取决于网格大小可能会出现段错误)。就像上面的答案一样,

将分配更改为类似于

data = malloc(size*size*sizeof(type)); 
grid = malloc(size*sizeof(type *)); 
for (int i=0; i<size; i++) 
    *grid[i] = &(data[i*size]);

或您将看到的许多变化中的任何一种。这为您提供了一个size*size类型的块,grid[]数组指向它。然后释放由

free(&(grid[0]));
free(grid);
于 2013-07-14T00:31:50.933 回答