0

这是我试图在 CUDA 中并行化的顺序代码

/*
    Sequential (Single Thread) APSP on CPU.
*/
void floyd_sequential(int *mat, const size_t N)
{
    for(int k = 0; k < N; k ++)
        for(int i = 0; i < N; i ++)
            for(int j = 0; j < N; j ++)
            {
                int i0 = i*N + j;
                int i1 = i*N + k;
                int i2 = k*N + j;
                if(mat[i1] != -1 && mat[i2] != -1)
                    mat[i0] = (mat[i0] != -1 && mat[i0] < mat[i1] + mat[i2]) ?
                      mat[i0] : (mat[i1] + mat[i2]);
            }
}

这是我的 CUDA 实现

// ParallelComputing.cpp : Defines the entry point for the console application.
//

#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>


#define DIMENSION 10;
__global__ void gpu_Floyd(int *result, int N)
{
    int j,k;
    int Row = blockIdx.y * blockDim.y + threadIdx.y;

    for(k = 0; k < N; k++)
    {
        for(j = 0; j < N; j++)
        {
            int i0 = Row * N + j;  
            int i1 = Row * N + k;
            int i2 = k * N + j;
            if(result[i0] != -1 && result[i2] != -1)
                    result[i0] = (result[i0] != -1 && result[i0] < result[i1] + result[i2]) ?
                      result[i0] : (result[i1] + result[i2]);
            __syncthreads();
        }
    }
}

   void GenMatrix(int *mat, const size_t N)
{
    for(int i = 0; i < N*N; i ++)
        mat[i] = rand()%32 - 1;

}

bool CmpArray(const int *l, const int *r, const size_t eleNum)
{
    for(int i = 0; i < eleNum; i ++)
        if(l[i] != r[i])
        {
            printf("ERROR: l[%d] = %d, r[%d] = %d\n", i, l[i], i, r[i]);
            return false;
        }
    return true;
}

int main(int argc, char **argv)
{

// generate a random matrix.
size_t N = 10;
int *mat = (int*)malloc(sizeof(int)*N*N);
GenMatrix(mat, N);

// compute the reference result.
int *ref = (int*)malloc(sizeof(int)*N*N);
memcpy(ref, mat, sizeof(int)*N*N);
Floyd_sequential(ref, N);


//CUDA Portion
int Grid_Dim_x = 1, Grid_Dim_y = 1;
int noThreads_x, noThreads_y;
int *result = (int*)malloc(sizeof(int)*N*N);
memcpy(result, mat, sizeof(int)*N*N);
int *d_result;

// compute your results

cudaMalloc((void **)&d_result, N*N);

cudaMemcpy(result, N * N, cudaMemcpyHostToDevice);
gpu_Floyd<<<1024, 256>>>(d_result, N);
cudaMemcpy(result, d_result, cudaMemcpyDeviceToHost);

// compare your result with reference result
if(CmpArray(result, ref, N*N))
    printf("The matrix matches.\n");
else
    printf("The matrix do not match.\n");

free(ref);
free(result);
cudaFree(d_result);
}

但是,我的输出总是显示矩阵不匹配。

我知道在 CUDA 中,我们尝试将矩阵中的每个元素映射到每一行。但是,我试图通过将矩阵的每一行映射到一个线程来探索可能性。

4

2 回答 2

2

正如已经提到的,您提供的 GPU 代码无法编译,所以我很好奇您是如何观察到您的输出矩阵不匹配的。以下是您的代码的一些问题:

  • cudaMalloc,就像malloc分配字节一样,所以这是不正确的:

    cudaMalloc((void **)&d_result, N*N);
    

相反,你想要这个:

    cudaMalloc((void **)&d_result, N*N*sizeof(int));
  • 同样cudaMemcpy,就像memcpy,对字节进行操作,并且还cudaMemcpy需要 4 个参数,所以这是不正确的:

    cudaMemcpy(result, N * N, cudaMemcpyHostToDevice);
    

相反,您可能想要这个:

    cudaMemcpy(d_result, result, N * N*sizeof(int), cudaMemcpyHostToDevice);

你的另cudaMemcpy一条线也需要同样修复。

  • 我还建议进行适当的 cuda 错误检查
  • 您的内核编写得好像它需要一个二维线程数组,或者至少是一维 in y,而您正在启动一个一维网格 in x

    gpu_Floyd<<<1024, 256>>>(d_result, N);
    

因此,您的所有内核内置变量y将始终为 1 或 0,并且这行代码:

        int Row = blockIdx.y * blockDim.y + threadIdx.y;

对于您的一维网格中的所有线程,将计算为零x

  • 您的 gpu 内核将结果放在与输入数据相同的矩阵中。对于顺序代码,这可能会或可能无关紧要,但对于旨在并行运行的代码,它通常会导致竞争条件,因为操作的顺序(即线程执行的顺序)在很大程度上是未定义的。
于 2013-11-11T16:50:48.953 回答
2

您将在下面找到CUDA中Floyd-Warshall算法的规范、简单实现。

CUDA 代码伴随着一个顺序实现,并且两者都基于边缘为非负的简化假设。在这两种情况下,也会重建完整的最小距离路径。尽管进行了简化假设,但应该可以掌握相关的并行化思想,即利用二维线程网格并将每个线程x分配给一个矩阵列,而将每个块y分配给一个矩阵行。这样,所有列都由threadIdx.x == 0共享内存中每个块的线程加载。

// --- Assumption: graph with positive edges

#include <stdio.h>
#include <string>
#include <map>
#include <iostream>
#include <fstream>

#include "Utilities.cuh"

#define BLOCKSIZE   256

using namespace std;

map<string, int> nameToNum;                 // --- names of vertices
map<string, map<string, int>> weightMap;    // --- weights of edges 

/************************/
/* READ GRAPH FROM FILE */
/************************/
int *readGraphFromFile(int &N, char *fileName) {

    string vertex1, vertex2;                
    ifstream graphFile;
    int currentWeight;          
    N = 0;                                              // --- Init the number of found vertices
    graphFile.open(fileName);                           // --- Open the graph file

    graphFile >> vertex1;                               // --- Read first vertex
    while(vertex1 != "--END--") {                       // --- Loop untile end of file has not been found
        graphFile >> vertex2;                           // --- Read second vertex
        graphFile >> currentWeight;                     // --- Read weight between first and second vertex
        if (nameToNum.count(vertex1) == 0) {            // --- If vertex has not yet been added ...
            nameToNum[vertex1] = N;                     //     assign a progressive number to the vertex
            weightMap[vertex1][vertex1] = 0;            //     assign a zero weight to the "self-edge"
            N++;                                        // --- Update the found number of vertices
        }
        if (nameToNum.count(vertex2) == 0) {
            nameToNum[vertex2] = N;
            weightMap[vertex2][vertex2] = 0;
            N++;
        }
        weightMap[vertex1][vertex2] = currentWeight;    // --- Update weight between vertices 1 and 2
        graphFile >> vertex1;
    }    
    graphFile.close();                                  // --- Close the graph file

    // --- Construct the array
    int *weightMatrix = (int*) malloc(N * N * sizeof(int));
    // --- Loop over all the vertex couples in the wights matrix
    for (int ii = 0; ii < N; ii++)                      
        for (int jj = 0; jj < N; jj++)
            weightMatrix[ii * N + jj] = INT_MAX / 2;    // --- Init the weights matrix elements to infinity
    map<string, int>::iterator i, j;
    // --- Loop over all the vertex couples in the map
    //     (*i).first and (*j).first are the weight entries of the map, while (*i).second and (*j).second are their corresponding indices
    for (i = nameToNum.begin(); i != nameToNum.end(); ++i)
        for (j = nameToNum.begin(); j != nameToNum.end(); ++j) {
            // --- If there is connection between vertices (*i).first and (*j).first, the update the weight matrix
            if (weightMap[(*i).first].count((*j).first) != 0) 
                weightMatrix[N * (*i).second + (*j).second] = weightMap[(*i).first][(*j).first];
        }
    return weightMatrix;
}

/************************************/
/* PRINT MINIMUM DISTANCES FUNCTION */
/************************************/
void printMinimumDistances(int N, int *a) {

    map<string, int>::iterator i;

    // --- Prints all the node labels at the first row
    for (i = nameToNum.begin(); i != nameToNum.end(); ++i) printf("\t%s", i->first.c_str());

    printf("\n");

    i = nameToNum.begin();

    // --- Loop over the rows
    for (int p = 0; p < N; p++) {

        printf("%s\t", i -> first.c_str());

        // --- Loop over the columns
        for (int q = 0; q < N; q++) {
            int dd =  a[p * N + q];
            if (dd != INT_MAX / 2) printf("%d\t", dd);
            else printf("--\t");
        }

        printf("\n");

        i++;
    }
}

void printPathRecursive(int row, int col, int *minimumDistances, int *path, int N) {
    map<string, int>::iterator i = nameToNum.begin();
    map<string, int>::iterator j = nameToNum.begin();
    if (row == col) {advance(i, row); printf("%s\t", i -> first.c_str()); }
    else {
        if (path[row * N + col] == INT_MAX / 2) printf("%row %row %row No path exists\t\n", minimumDistances[row * N + col], row, col);
        else {
            printPathRecursive(row, path[row * N + col], minimumDistances, path, N);
            advance(j, col);
            printf("%s\t", j -> first.c_str());
        }
    }
}

void printPath(int N, int *minimumDistances, int *path) {

    map<string, int>::iterator i;
    map<string, int>::iterator j;

    // --- Loop over the rows
    i = nameToNum.begin();
    for (int p = 0; p < N; p++) {

        // --- Loop over the columns
        j = nameToNum.begin();
        for (int q = 0; q < N; q++) {
            printf("From %s to %s\t", i -> first.c_str(), j -> first.c_str());
            printPathRecursive(p, q, minimumDistances, path, N);
            printf("\n");
            j++;
        }

        i++;
    }
}

/**********************/
/* FLOYD-WARSHALL CPU */
/**********************/
void h_FloydWarshall(int *h_graphMinimumDistances, int *h_graphPath, const int N) {
    for (int k = 0; k < N; k++)
        for (int row = 0; row < N; row++)
            for (int col = 0; col < N; col++) {
                if (h_graphMinimumDistances[row * N + col] > (h_graphMinimumDistances[row * N + k] + h_graphMinimumDistances[k * N + col])) {
                    h_graphMinimumDistances[row * N + col] = (h_graphMinimumDistances[row * N + k] + h_graphMinimumDistances[k * N + col]);
                    h_graphPath[row * N + col] = h_graphPath[k * N + col];
                }
            }
}

/*************************/
/* FLOYD-WARSHALL KERNEL */
/*************************/
__global__ void d_FloydWarshall(int k, int *d_graphMinimumDistances, int *d_graphPath, int N) {

    int col = blockIdx.x * blockDim.x + threadIdx.x;    // --- Each thread along x is assigned to a matrix column
    int row = blockIdx.y;                               // --- Each block along y is assigned to a matrix row

    if (col >= N) return;

    int arrayIndex = N * row + col;             

    // --- All the blocks load the entire k-th column into shared memory
    __shared__ int d_graphMinimumDistances_row_k;          
    if(threadIdx.x == 0) d_graphMinimumDistances_row_k = d_graphMinimumDistances[N * row + k];
    __syncthreads();

    if (d_graphMinimumDistances_row_k == INT_MAX / 2)   // --- If element (row, k) = infinity, no update is needed
    return;

    int d_graphMinimumDistances_k_col = d_graphMinimumDistances[k * N + col]; 
    if(d_graphMinimumDistances_k_col == INT_MAX / 2)    // --- If element (k, col) = infinity, no update is needed
    return;

    int candidateBetterDistance = d_graphMinimumDistances_row_k + d_graphMinimumDistances_k_col;
    if (candidateBetterDistance < d_graphMinimumDistances[arrayIndex]) {
        d_graphMinimumDistances[arrayIndex] = candidateBetterDistance;
        d_graphPath[arrayIndex] = d_graphPath[k * N + col];
    }
}

/********/
/* MAIN */
/********/
int main() {

    int N = 0;                  // --- Number of vertices

    // --- Read graph array from file
    int *h_graphArray = readGraphFromFile(N, "graph2.txt");     
    printf("\n******************\n");
    printf("* Original graph *\n");
    printf("******************\n");
    printMinimumDistances(N, h_graphArray);

    // --- Floyd-Warshall on CPU
    int *h_graphMinimumDistances = (int *) malloc(N * N * sizeof(int));
    int *h_graphPath             = (int *) malloc(N * N * sizeof(int));
    memcpy(h_graphMinimumDistances, h_graphArray, N * N * sizeof(int));
    for (int k = 0; k < N; k++) 
        for (int l = 0; l < N; l++) 
            if (h_graphArray[k * N + l] == INT_MAX / 2) h_graphPath[k * N + l] = INT_MAX / 2;
            else h_graphPath[k * N + l] = k;

    h_FloydWarshall(h_graphMinimumDistances, h_graphPath, N);
    printf("\n*************************\n");
    printf("* CPU result: distances *\n");
    printf("*************************\n");
    printMinimumDistances(N, h_graphMinimumDistances);
    printf("\n********************\n");
    printf("* CPU result: path *\n");
    printf("********************\n");
    printPath(N, h_graphMinimumDistances, h_graphPath);

    // --- Graph array device allocation and host-device memory transfer
    int *d_graphMinimumDistances;   gpuErrchk(cudaMalloc(&d_graphMinimumDistances, N * N * sizeof(int)));
    gpuErrchk(cudaMemcpy(d_graphMinimumDistances, h_graphArray, N * N * sizeof(int), cudaMemcpyHostToDevice));
    int *d_graphPath;               gpuErrchk(cudaMalloc(&d_graphPath, N * N * sizeof(int)));
    for (int k = 0; k < N; k++) 
        for (int l = 0; l < N; l++) 
            if (h_graphArray[k * N + l] == INT_MAX / 2) h_graphPath[k * N + l] = INT_MAX / 2;
            else h_graphPath[k * N + l] = k;
    gpuErrchk(cudaMemcpy(d_graphPath, h_graphPath, N * N * sizeof(int), cudaMemcpyHostToDevice));

    // --- Iterations
    for (int k = 0; k < N; k++) {
        d_FloydWarshall <<<dim3(iDivUp(N, BLOCKSIZE), N), BLOCKSIZE>>>(k, d_graphMinimumDistances, d_graphPath, N);
#ifdef DEBUG
        gpuErrchk(cudaPeekAtLastError());
        gpuErrchk(cudaDeviceSynchronize());
#endif
    }

    // --- Copy results back to the host
    gpuErrchk(cudaMemcpy(h_graphMinimumDistances, d_graphMinimumDistances, N * N * sizeof(int), cudaMemcpyDeviceToHost));
    gpuErrchk(cudaMemcpy(h_graphPath, d_graphPath, N * N * sizeof(int), cudaMemcpyDeviceToHost));
    printf("\n**************\n");
    printf("* GPU result *\n");
    printf("**************\n");
    printMinimumDistances(N, h_graphMinimumDistances);
    printf("\n********************\n");
    printf("* GPU result: path *\n");
    printf("********************\n");
    printPath(N, h_graphMinimumDistances, h_graphPath);

}
于 2016-02-13T07:45:45.410 回答