3

我一直在尝试编写一个内核来计算 N 给定点之间距离的倒数之和。C 中的串行尾声就像

    average = 0;
for(int i = 0; i < Np; i++){
    for(int j = i + 1; j < Np; j++){
        average += 1.0e0f/sqrtf((rx[i]-rx[j])*(rx[i]-rx[j]) + (ry[i]-ry[j])*(ry[i]-ry[j]));
    }
}
average = average/(float)N;

其中 rx 和 ry 分别是 x 和 y 坐标。

我通过使用随机数生成器的内核生成点。对于内核,我每个块使用 128(256) 个线程来获得 4k(8k) 个点。在它上面每个线程执行inner above inner loop,然后将结果传递给reduce sum函数,如下

生成点:

__global__ void InitRNG ( curandState * state, const int seed ){
    int tIdx = blockIdx.x*blockDim.x + threadIdx.x;
    curand_init (seed, tIdx, 0, &state[tIdx]);
}

__global__
void SortPoints(float* X, float* Y,const int N, curandState *state){

    float rdmn1, rdmn2;

    unsigned int tIdx = blockIdx.x*blockDim.x + threadIdx.x;
    float range;
    if(tIdx < N){
        rdmn1 = curand_uniform(&state[tIdx]);
        rdmn2 = curand_uniform(&state[tIdx]);
        range = sqrtf(0.25e0f*N*rdmn1);
            X[tIdx] = range*cosf(2.0e0f*pi*rdmn2);
            Y[tIdx] = range*sinf(2.0e0f*pi*rdmn2);
    }
}

减少:

__device__
float ReduceSum2(float In){

    __shared__ float data[BlockSize];

    unsigned int tIdx = threadIdx.x;

    data[tIdx] = In;
    __syncthreads();

    for(unsigned int i = blockDim.x/2; i > 0; i >>= 1){

        if(tIdx < i){

            data[tIdx] += data[tIdx + i];   
        }

        __syncthreads();
    }

    return data[0];

}

核心:

__global__ 
void AvgDistance(float *X, float *Y, float *Avg, const int N){

    int tIdx = blockIdx.x*blockDim.x + threadIdx.x;
    int bIdx = blockIdx.x;

    float x , y;
    float d = 0.0f;
    if(tIdx < N){

        for(int i = tIdx + 1; i < N ; i++){

            x = X[tIdx] - X[i];
            y = Y[tIdx] - Y[i];

            d += 1.0e0f/(sqrtf(x*x + y*y));     
        }
        __syncthreads();
        Avg[bIdx] = ReduceSum2(d);

    }
}

内核配置和启动如下:

dim3 threads(BlockSize,BlockSize);
dim3 blocks(ceil(Np/threads.x),ceil(Np/threads.y));

InitRNG<<<blocks.x,threads.x>>>(d_state,seed);
SortPoints<<<blocks.x,threads.x>>>(d_rx,d_ry,Np,d_state);
AvgDistance<<<blocks.x,threads.x,threads.x*sizeof(float)>>>(d_rx,d_ry,d_Avg,Np);

最后,我将数据复制回主机,然后执行剩余的总和:

Avg = new float[blocks.x];

CHECK(cudaMemcpy(Avg,d_Avg,blocks.x*sizeof(float),cudaMemcpyDeviceToHost),ERROR_CPY_DEVTOH);
float average = 0;

for(int i = 0; i < blocks.x; i++){
    average += Avg[i];
}
average = average/(float)Np;

4k点,ok!结果是:

Average distance between points (via Kernel) = 108.615
Average distance between points (via CPU) = 110.191

在这种情况下,总和可能会以不同的顺序执行,导致两个结果相互分歧,我不知道......

但是当谈到 8k 时,结果却完全不同:

Average distance between points (via Kernel) = 153.63
Average distance between points (via CPU) = 131.471

对我来说,内核和串行代码似乎都是以相同的方式编写的。是什么让我不相信 CUDA 计算浮点数的精度。这有意义吗?或者当某些线程同时从 X 和 Y 加载相同的数据时,对全局内存的访问是否会导致一些冲突?或者我编写内核的方式在某种程度上是“错误的”(我的意思是,我是否正在做一些导致两个结果相互分歧的事情?)。

4

1 回答 1

7

实际上,据我所知,问题似乎出在 CPU 方面。我根据您的代码创建了一个示例代码。

我能够重现您的结果。

sinf首先,我将、cosf和的所有实例切换sqrtf到它们相应的双版本。这对结果没有影响。

float接下来我包含了一个 typedef,这样我就可以轻松地来回切换精度,用我的 typedef 替换代码中的double每个相关实例。floatmytype

当我使用 typedef offloat和 4096 的数据大小运行代码时,我得到以下结果:

GPU average = 108.294922
CPU average = 109.925285

当我使用 typedef ofdouble和 4096 的数据大小运行代码时,我得到以下结果:

GPU average = 108.294903
CPU average = 108.294903

当我使用 typedef offloat和 8192 的数据大小运行代码时,我得到以下结果:

GPU average = 153.447327
CPU average = 131.473526

当我使用 typedef ofdouble和 8192 的数据大小运行代码时,我得到以下结果:

GPU average = 153.447380
CPU average = 153.447380

至少有 2 个观察结果:

  1. GPU 结果在浮点数和双精度数之间没有变化,除了小数点后 5 位
  2. The CPU results vary by 1-20% or so between float and double, but when double is selected, they line up exactly (to the 6th decimal place, anyway) with the GPU results.

基于此,我相信 CPU 正在提供可变的、有问题的行为。

这是我的代码供参考:

#include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#define DSIZE 8192
#define BlockSize 32
#define pi 3.14159f


#define cudaCheckErrors(msg) \
    do { \
        cudaError_t __err = cudaGetLastError(); \
        if (__err != cudaSuccess) { \
            fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                msg, cudaGetErrorString(__err), \
                __FILE__, __LINE__); \
            fprintf(stderr, "*** FAILED - ABORTING\n"); \
            exit(1); \
        } \
    } while (0)

typedef double mytype;

__global__ void InitRNG ( curandState * state, const int seed ){
    int tIdx = blockIdx.x*blockDim.x + threadIdx.x;
    curand_init (seed, tIdx, 0, &state[tIdx]);
}

__global__
void SortPoints(mytype* X, mytype* Y,const int N, curandState *state){

    mytype rdmn1, rdmn2;

    unsigned int tIdx = blockIdx.x*blockDim.x + threadIdx.x;
    mytype range;
    if(tIdx < N){
        rdmn1 = curand_uniform(&state[tIdx]);
        rdmn2 = curand_uniform(&state[tIdx]);
        range = sqrt(0.25e0f*N*rdmn1);
            X[tIdx] = range*cos(2.0e0f*pi*rdmn2);
            Y[tIdx] = range*sin(2.0e0f*pi*rdmn2);
    }
}

__device__
mytype ReduceSum2(mytype In){

    __shared__ mytype data[BlockSize];

    unsigned int tIdx = threadIdx.x;

    data[tIdx] = In;
    __syncthreads();

    for(unsigned int i = blockDim.x/2; i > 0; i >>= 1){

        if(tIdx < i){

            data[tIdx] += data[tIdx + i];
        }

        __syncthreads();
    }

    return data[0];

}

__global__
void AvgDistance(mytype *X, mytype *Y, mytype *Avg, const int N){

    int tIdx = blockIdx.x*blockDim.x + threadIdx.x;
    int bIdx = blockIdx.x;

    mytype x , y;
    mytype d = 0.0f;
    if(tIdx < N){

        for(int i = tIdx + 1; i < N ; i++){

            x = X[tIdx] - X[i];
            y = Y[tIdx] - Y[i];

            d += 1.0e0f/(sqrt(x*x + y*y));
        }
        __syncthreads();
        Avg[bIdx] = ReduceSum2(d);

    }
}

mytype cpu_avg(const mytype *rx, const mytype *ry, const int size){
  mytype average = 0.0f;
  for(int i = 0; i < size; i++){
    for(int j = i + 1; j < size; j++){
        average += 1.0e0f/sqrt((rx[i]-rx[j])*(rx[i]-rx[j]) + (ry[i]-ry[j])*(ry[i]-ry[j]));
    }
  }
  average = average/(mytype)size;
  return average;
}

int main() {

  int Np = DSIZE;
  mytype *rx, *ry, *d_rx, *d_ry, *d_Avg, *Avg;
  curandState *d_state;
  int seed = 1;

  dim3 threads(BlockSize,BlockSize);
  dim3 blocks((int)ceilf(Np/(float)threads.x),(int)ceilf(Np/(float)threads.y));
  printf("number of blocks = %d\n", blocks.x);
  printf("number of threads= %d\n", threads.x);
  rx = (mytype *)malloc(DSIZE*sizeof(mytype));
  if (rx == 0) {printf("malloc fail\n"); return 1;}
  ry = (mytype *)malloc(DSIZE*sizeof(mytype));
  if (ry == 0) {printf("malloc fail\n"); return 1;}

  cudaMalloc((void**)&d_rx, DSIZE * sizeof(mytype));
  cudaMalloc((void**)&d_ry, DSIZE * sizeof(mytype));
  cudaMalloc((void**)&d_Avg, blocks.x * sizeof(mytype));
  cudaMalloc((void**)&d_state, DSIZE * sizeof(curandState));
  cudaCheckErrors("cudamalloc");



  InitRNG<<<blocks.x,threads.x>>>(d_state,seed);
  SortPoints<<<blocks.x,threads.x>>>(d_rx,d_ry,Np,d_state);
  AvgDistance<<<blocks.x,threads.x,threads.x*sizeof(mytype)>>>(d_rx,d_ry,d_Avg,Np);
  cudaCheckErrors("kernels");


  Avg = new mytype[blocks.x];

  cudaMemcpy(Avg,d_Avg,blocks.x*sizeof(mytype),cudaMemcpyDeviceToHost);
  cudaMemcpy(rx, d_rx, DSIZE*sizeof(mytype),cudaMemcpyDeviceToHost);
  cudaMemcpy(ry, d_ry, DSIZE*sizeof(mytype),cudaMemcpyDeviceToHost);
  cudaCheckErrors("cudamemcpy");
  mytype average = 0;

  for(int i = 0; i < blocks.x; i++){
    average += Avg[i];
  }
  average = average/(mytype)Np;

  printf("GPU average = %f\n", average);
  average = cpu_avg(rx, ry, DSIZE);
  printf("CPU average = %f\n", average);

  return 0;
}

我在 RHEL 5.5、CUDA 5.0、Intel Xeon X5560 上运行

编译:

nvcc -O3 -arch=sm_20 -lcurand -lm -o t93 t93.cu

编辑: 在观察到 CPU 方面的可变性之后,我发现我可以通过修改 CPU 平均代码来消除大部分 CPU 可变性,如下所示:

mytype cpu_avg(const mytype *rx, const mytype *ry, const int size){
  mytype average = 0.0f;
  mytype temp = 0.0f;
  for(int i = 0; i < size; i++){
    for(int j = i + 1; j < size; j++){
        temp += 1.0e0f/sqrt((rx[i]-rx[j])*(rx[i]-rx[j]) + (ry[i]-ry[j])*(ry[i]-ry[j]));
    }
    average += temp/(mytype)size;
    temp = 0.0f;
  }
  return average;
}

所以我会说CPU端的中间结果存在问题。有趣的是它没有出现在 GPU 结果上。我怀疑这样做的原因是 GPU 平均值的最终总和是在 CPU 上完成的(因此每个单独的 GPU 块结果按大小按比例缩小,例如 8192),并且这些可能具有足够的中间精度,直到最后的分裂。如果您内联 CPU 平均计算,您可能会再次观察到不同的情况。

于 2013-03-13T02:12:45.897 回答