5

我试图了解并发内核执行的工作原理。我写了一个简单的程序来尝试理解它。内核将使用 2 个流填充 2D 数组。当有 1 个流,没有并发时,我得到了正确的结果。当我尝试使用 2 个流,尝试并发时,我得到了错误的结果。我相信这要么与内存传输有关,因为我不太确定我是否正确或我设置内核的方式。编程指南对我的解释不够好。出于我的目的,我需要 Matlab 来调用内核。

据我了解,主程序将:

  • 在主机上分配固定内存
  • 在 GPU 上分配单个流所需的内存(2 个流 = 主机总内存的一半)
  • 创建流
  • 循环穿过溪流
  • 使用 cudaMemcpyAsync() 将单个流的内存从主机复制到设备
  • 为流执行内核
  • 将流的内存复制回主机,cudaMemcpyAsync()
    • 我相信我通过使用基于每个流的数据大小和流编号的偏移量从我需要它的位置为每个流引用内存来做正确的事情。
  • 摧毁溪流
  • 释放内存

这是我尝试使用的代码。

并发内核.cpp

__global__ void concurrentKernel(int const width, 
                                  int const streamIdx,
                                  double *array)
 {
     int thread = (blockIdx.x * blockDim.x) + threadIdx.x;;

     for (int i = 0; i < width; i ++)
     {
        array[thread*width+i] = thread+i*width+1;
//         array[thread*width+i+streamIdx] = thread+i*width+streamIdx*width/2;
     }

 }

并发MexFunction.cu

#include <stdio.h>
#include <math.h>
#include "mex.h"

/* Kernel function */
#include "concurrentKernel.cpp"


void mexFunction(int        nlhs,
                 mxArray    *plhs[],
                 int        nrhs,
                 mxArray    *prhs[])
{

    int const numberOfStreams = 2; // set number of streams to use here.
    cudaError_t cudaError;
    int offset;

    int width, height, fullSize, streamSize;
    width = 512;
    height = 512;
    fullSize = height*width;
    streamSize = (int)(fullSize/numberOfStreams);
    mexPrintf("fullSize: %d, streamSize: %d\n",fullSize, streamSize);

    /* Return the populated array */
    double *returnedArray;
    plhs[0] = mxCreateDoubleMatrix(height, width, mxREAL);
    returnedArray = mxGetPr(plhs[0]);

    cudaStream_t stream[numberOfStreams];
    for (int i = 0; i < numberOfStreams; i++)
    {
        cudaStreamCreate(&stream[i]);    
    }

    /* host memory */
    double *hostArray;
    cudaError = cudaMallocHost(&hostArray,sizeof(double)*fullSize);    // full size of array.
    if (cudaError != cudaSuccess) {mexPrintf("hostArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

    for (int i = 0; i < height; i++)
    {
        for (int j = 0; j < width; j++)
        {
            hostArray[i*width+j] = -1.0;
        }
    }

    /* device memory */
    double *deviceArray;
    cudaError = cudaMalloc( (void **)&deviceArray,sizeof(double)*streamSize);    // size of array for each stream.
    if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }


    for (int i = 0; i < numberOfStreams; i++)
    {
        offset = i;//*streamSize;
        mexPrintf("offset: %d, element: %d\n",offset*sizeof(double),offset);

        cudaMemcpyAsync(deviceArray, hostArray+offset, sizeof(double)*streamSize, cudaMemcpyHostToDevice, stream[i]);
        if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

        concurrentKernel<<<1, 512, 0, stream[i]>>>(width, i, deviceArray);

        cudaMemcpyAsync(returnedArray+offset, deviceArray, sizeof(double)*streamSize, cudaMemcpyDeviceToHost, stream[i]);
        if (cudaError != cudaSuccess) {mexPrintf("returnedArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

        mexPrintf("returnedArray[offset]: %g, [end]: %g\n",returnedArray[offset/sizeof(double)],returnedArray[(i+1)*streamSize-1]);
    }


    for (int i = 0; i < numberOfStreams; i++)
    {
        cudaStreamDestroy(stream[i]);    
    }

    cudaFree(hostArray);
    cudaFree(deviceArray);

}

当有 2 个流时,结果是一个零数组,这让我觉得我的内存做错了。谁能解释我做错了什么?如果有人需要帮助从 Matlab 编译和运行这些,我可以提供执行此操作的命令。

更新:

for (int i = 0; i < numberOfStreams; i++)
{
    offset = i*streamSize;
    mexPrintf("offset: %d, element: %d\n",offset*sizeof(double),offset);

    cudaMemcpyAsync(deviceArray, hostArray+offset, sizeof(double)*streamSize, cudaMemcpyHostToDevice, stream[i]);
    if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

    concurrentKernel<<<1, 512, 0, stream[i]>>>(width, i, deviceArray);


}
cudaDeviceSynchronize();


for (int i = 0; i < numberOfStreams; i++)
{
    offset = i*streamSize;
    mexPrintf("offset: %d, element: %d\n",offset*sizeof(double),offset);

    cudaMemcpyAsync(returnedArray+offset, deviceArray, sizeof(double)*streamSize, cudaMemcpyDeviceToHost, stream[i]);
    if (cudaError != cudaSuccess) {mexPrintf("returnedArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

    mexPrintf("returnedArray[offset]: %g, [end]: %g\n",returnedArray[offset/sizeof(double)],returnedArray[(i+1)*streamSize-1]);

    cudaStreamDestroy(stream[i]);    
}
4

2 回答 2

6

您需要记住,与流一起使用的 API 是完全异步的,因此控制权会立即返回给调用主机线程。如果您不在运行异步操作的 GPU 和主机之间插入某种同步点,则无法保证您在流中排队的操作实际上已完成。在您的示例中,这意味着需要这样的东西:

for (int i = 0; i < numberOfStreams; i++) 
{ 
    offset = i;//*streamSize; 
    mexPrintf("offset: %d, element: %d\n",offset*sizeof(double),offset); 

    cudaMemcpyAsync(deviceArray, hostArray+offset, sizeof(double)*streamSize, 
                    cudaMemcpyHostToDevice, stream[i]); 

    concurrentKernel<<<1, 512, 0, stream[i]>>>(width, i, deviceArray); 

    cudaMemcpyAsync(returnedArray+offset, deviceArray, sizeof(double)*streamSize,
                    cudaMemcpyDeviceToHost, stream[i]); 
} 

// Host thread waits here until both kernels and copies are finished
cudaDeviceSynchronize();

for (int i = 0; i < numberOfStreams; i++) 
{ 
    mexPrintf("returnedArray[offset]: %g, [end]: %g\n",returnedArray[offset/sizeof(double)],returnedArray[(i+1)*streamSize-1]); 
    cudaStreamDestroy(stream[i]);     
} 

这里的关键是,在尝试检查主机内存中的结果之前,您需要确保两个内存传输都已完成。您的原始代码和更新都不会这样做。

于 2012-09-10T07:10:55.890 回答
1

此外,您似乎正在为不同的并发流重用 deviceArray 指针。如果当前代码按原样工作,很可能是因为@Tom 提到的错误依赖关系导致硬件按顺序运行流。你真的应该有一个单独的 deviceArray 每个流:

/* device memory */
double *deviceArray[numberOfStreams];
for (int i = 0; i < numberOfStreams; i++)
{
    cudaError = cudaMalloc( (void **)&deviceArray[i],sizeof(double)*streamSize);    // size of array for each stream.
    if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }
}

for (int i = 0; i < numberOfStreams; i++)
{
    offset = i;//*streamSize;
    mexPrintf("offset: %d, element: %d\n",offset*sizeof(double),offset);

    cudaMemcpyAsync(deviceArray[i], hostArray+offset, sizeof(double)*streamSize, cudaMemcpyHostToDevice, stream[i]);
    if (cudaError != cudaSuccess) {mexPrintf("deviceArray memory allocation failed\n********** Error: %s **********\n",cudaGetErrorString(cudaError)); return; }

    concurrentKernel<<<1, 512, 0, stream[i]>>>(width, i, deviceArray[i]); 

    cudaMemcpyAsync(returnedArray+offset, deviceArray[i], sizeof(double)*streamSize,
                    cudaMemcpyDeviceToHost, stream[i]);     
}
于 2012-09-10T22:48:56.780 回答