0

各位 Cuda 程序员,

我正在尝试使用轮询机制实现 cpu-gpu 回调机制。我有 2 个长度为 1 的数组(a 和 cpuflag,对应于设备端 dev_a 和 gpuflag)(基本上是 2 个变量)。

首先 CPU 清除 a 并等待 gpuflag 的更新。GPU 看到这个 a 的清除,然后更新 gpuflag。CPU 异步不断地将 gpuflag 传输到 cpuflag 并等待标志中的更新。一旦 CPU 看到更新,它会再次重置 a 并将其异步发送到 gpu。GPU 再次看到 a 的清除并更新 gpuflag 并且乒乓过程继续。我希望这个过程持续 100 次。

整个代码在这里。你可以通过说 nvcc -o output filename.cu 来编译它我无法理解为什么代码没有表现出乒乓行为。非常感谢任何形式的帮助。提前致谢。

#include <stdio.h>

#define LEN 1
#define MAX 100

__global__ void myKernel(int len, int *dev_a, int *gpuflag) {
        int tid = threadIdx.x;
        gpuflag[tid] = 0;

        while(true){
        //Check if cpu has completed work
                if(dev_a[tid] == 0){
            //Do gpu work and increment flag
                        dev_a[tid] = 1;
                        gpuflag[tid]++;

            //Wait till cpu detects the flag increment and resets
                        while(true){
                                if(dev_a[tid] == 0){
                                        break;
                                }
                        }
                }
        //Max 100 ping pongs
        if(gpuflag[tid]==MAX){
            break;
        }
        }
}

int main( void ) {
        int index, *cpuflag, *gpuflag, value;

        int *a;
        int *dev_a;

        cudaStream_t stream0, stream1;

        cudaStreamCreate( &stream0 );
        cudaStreamCreate( &stream1 );

        cudaMalloc ( (void**)&gpuflag, LEN*sizeof(int) );
        cudaMemset ( gpuflag, 0, LEN*sizeof(int) );
        cudaHostAlloc( (void**)&cpuflag, LEN*sizeof(int), cudaHostAllocDefault );

        cudaMalloc ( (void**)&dev_a, LEN*sizeof(int) );
        cudaMemset ( dev_a, 0, LEN*sizeof(int) );
        cudaHostAlloc( (void**)&a, LEN*sizeof(int), cudaHostAllocDefault );

    //Reset everything
        for(int i=0; i<LEN; i++)
                a[i] = 0;

    //Auxillary variables
        index = 0;
    value = 1;

    //call kernel
        myKernel<<<1,1,0,stream0>>>(LEN, dev_a, gpuflag);

        while(true){
        //Asynchronously copy gpu flag
                cudaMemcpyAsync(cpuflag, gpuflag, LEN*sizeof(int), cudaMemcpyDeviceToHost, stream1);
        //Check if increment has happened or not
                if(cpuflag[index] == value){
            //if yes, reset 
                for(int i=0; i<LEN; i++)
                        a[i] = 0;
            //transfer asynchronously
                    cudaMemcpyAsync(dev_a, a, LEN*sizeof(int), cudaMemcpyHostToDevice, stream1);
            //increment pattern
            value++;
                        printf("GPU updated once. Value is a[%d] = %d, cpuflag = %d\n", index, a[index], cpuflag[index]);
                } else {
                        printf("------------GPU didn't updated. Value is a[%d] = %d, cpuflag = %d\n", index, a[index], cpuflag[index]);
        }

        //Max 100 ping-pongs
        if(value == MAX){
            break;
        }
        }

    cudaFreeHost(a);
    cudaFreeHost(cpuflag);

    cudaFree(dev_a);
    cudaFree(gpuflag);

    cudaStreamDestroy( stream0 );
    cudaStreamDestroy( stream1 );

        return 0;
}
4

1 回答 1

2

可能缺少的主要内容是适当使用volatile.

这是一个简化的、完整的示例:

$ cat t763.cu
#include <stdio.h>

#define LEN 1
#define MAX 100
#define DLEN 1000
#define nTPB 256

#ifdef CDP_WORKER
__global__ void cdp_worker(int len, float *data){

  int tid = threadIdx.x+blockDim.x*blockIdx.x;
  if (tid < len) data[tid]++; // simple increment
}
#endif

// only call this kernel with 1 thread
__global__ void myKernel(int len, int dlen, volatile int *dev_a, int *gpuflag, float *data) {
        int tid = threadIdx.x+blockDim.x*blockIdx.x;

        while(gpuflag[tid] < MAX){
        //Check if cpu has completed work
                if(dev_a[tid] == 0){
            //Do gpu work and increment flag
#ifdef CDP_WORKER
                        cdp_worker<<<(dlen+nTPB-1)/nTPB, nTPB>>>(dlen, data);
                        cudaDeviceSynchronize();
#endif
                        dev_a[tid] = 1;
                        gpuflag[tid]++;

                                }
        }
}

void issue_work(int value, float *h_data, float *d_data, int len, cudaStream_t mystream){
#ifdef CDP_WORKER
  cudaMemcpyAsync(h_data, d_data, len*sizeof(float), cudaMemcpyDeviceToHost, mystream);
  cudaStreamSynchronize(mystream);
  for (int i = 0; i < len; i++) if (h_data[i] != value+1) {printf("fault - was %f, should be %f\n", h_data[i], (float)(value+1)); break;}
  cudaMemcpyAsync(d_data, h_data, len*sizeof(float), cudaMemcpyHostToDevice, mystream); // technically not really necessary
  cudaStreamSynchronize(mystream);
#endif
  return;
}
int main( void ) {
        int *gpuflag, value;
        float *h_data, *d_data;
        cudaHostAlloc(&h_data, DLEN*sizeof(float), cudaHostAllocDefault);
        cudaMalloc(&d_data, DLEN*sizeof(float));
        volatile int *z_a;

        cudaStream_t stream0, stream1;

        cudaStreamCreate( &stream0 );
        cudaStreamCreate( &stream1 );

        cudaMalloc ( (void**)&gpuflag, LEN*sizeof(int) );
        cudaMemset ( gpuflag, 0, LEN*sizeof(int) );
        cudaMemset ( d_data, 0, DLEN*sizeof(float));
        cudaHostAlloc( (void**)&z_a, LEN*sizeof(int), cudaHostAllocMapped );
        for (int i = 0; i < LEN; i++) z_a[i] =
        value = 0;
    //call kernel
        myKernel<<<1,1,0,stream0>>>(LEN, DLEN, z_a, gpuflag, d_data);

        while(value<MAX){
          if (z_a[0] == 1) {
             issue_work(value, h_data, d_data, DLEN, stream1);
             z_a[0] = 0;
             printf("%d", value%10);
             value++;}
        }
        printf("\n");
        return 0;
}
$ nvcc -o t763 t763.cu
$ cuda-memcheck ./t763
========= CUDA-MEMCHECK
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
========= ERROR SUMMARY: 0 errors
$ nvcc -DCDP_WORKER -arch=sm_35 -rdc=true t763.cu -o t763 -lcudadevrt
$ cuda-memcheck ./t763
========= CUDA-MEMCHECK
0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
========= ERROR SUMMARY: 0 errors
$

将其扩展为在同一个经线中的多个线程上工作并非易事。

但是,我扩展了基本示例以在 cc3.5+ 设备上演示父内核可以是监督内核,并且它可以通过子内核启动工作。这是通过使用CDP_WORKERCUDA 动态并行所需的开关和附加开关进行编译,并在 cc3.5+ 设备上运行来实现的。

于 2015-05-10T02:12:54.367 回答