0

我正在尝试使用 CUB 减少方法进行总和。

最大的问题是:我不确定在使用二维网格时如何将每个块的值返回给主机。

#include <iostream>
#include <math.h>
#include <cub/block/block_reduce.cuh>
#include <cub/block/block_load.cuh>
#include <cub/block/block_store.cuh>
#include <iomanip>

#define nat 1024
#define BLOCK_SIZE 32
#define GRID_SIZE 32

struct frame
{
   int  natm;
   char  title[100];
   float conf[nat][3];
};

using namespace std;
using namespace cub;

__global__
void add(frame* s, float L, float rc, float* blocksum)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;

float E=0.0, rij, dx, dy, dz;

// Your calculations first so that each thread holds its result
  dx = fabs(s->conf[j][0] - s->conf[i][0]);
  dy = fabs(s->conf[j][1] - s->conf[i][1]);
  dz = fabs(s->conf[j][2] - s->conf[i][2]);
  dx = dx - round(dx/L)*L;
  dy = dy - round(dy/L)*L;
  dz = dz - round(dz/L)*L;

   rij = sqrt(dx*dx + dy*dy + dz*dz);

  if ((rij <= rc) && (rij > 0.0))
    {E =  (4*((1/pow(rij,12))-(1/pow(rij,6))));}

//  E = 1.0;
__syncthreads();
// Block wise reduction so that one thread in each block holds sum of thread results

typedef cub::BlockReduce<float, BLOCK_SIZE, BLOCK_REDUCE_RAKING, BLOCK_SIZE> BlockReduce;

__shared__ typename BlockReduce::TempStorage temp_storage;

float aggregate = BlockReduce(temp_storage).Sum(E);

if (threadIdx.x == 0 && threadIdx.y == 0)
    blocksum[blockIdx.x*blockDim.y + blockIdx.y] = aggregate;

}

int main(void)
{
  frame  * state = (frame*)malloc(sizeof(frame));

  float *blocksum = (float*)malloc(GRID_SIZE*GRID_SIZE*sizeof(float));

  state->natm = nat; //inicializando o numero de atomos;

  char name[] = "estado1";
  strcpy(state->title,name);

  for (int i = 0; i < nat; i++) {
    state->conf[i][0] = i;
    state->conf[i][1] = i;
    state->conf[i][2] = i;
  }

  frame * d_state;
  float *d_blocksum;

  cudaMalloc((void**)&d_state, sizeof(frame));

  cudaMalloc((void**)&d_blocksum, ((GRID_SIZE*GRID_SIZE)*sizeof(float)));

  cudaMemcpy(d_state, state, sizeof(frame),cudaMemcpyHostToDevice);


  dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
  dim3 gridBlock(GRID_SIZE,GRID_SIZE);

  add<<<gridBlock,dimBlock>>>(d_state, 3000, 15, d_blocksum);

  cudaError_t status =  cudaMemcpy(blocksum, d_blocksum, ((GRID_SIZE*GRID_SIZE)*sizeof(float)),cudaMemcpyDeviceToHost);

  float Etotal = 0.0;
  for (int k = 0; k < GRID_SIZE*GRID_SIZE; k++){
       Etotal += blocksum[k];
  }
 cout << endl << "energy: " << Etotal << endl;

  if (cudaSuccess != status)
  {
    cout << cudaGetErrorString(status) << endl;
  }

 // Free memory
  cudaFree(d_state);
  cudaFree(d_blocksum);

  return cudaThreadExit();
}

发生的情况是,如果 的值GRID_SIZE与 相同BLOCK_SIZE,如上所述。计算是正确的。但是如果我改变 的值GRID_SIZE,结果就会出错。这使我认为错误在此代码中:

blocksum[blockIdx.x*blockDim.y + blockIdx.y] = aggregate;

这里的想法是返回一个一维数组,其中包含每个块的总和。

我不打算更改BLOCK_SIZE值,但值GRID_SIZE取决于我正在查看的系统,我打算使用大于 32 的值(始终是该值的倍数)。

我寻找了一些使用带有 CUB 的 2D 网格的示例,但没有找到。

我真的是 CUDA 程序的新手,也许我犯了一个错误。

编辑:我把完整的代码。作为比较,当我为串行程序计算这些精确值时,它给了我能量:-297,121

4

1 回答 1

0

可能主要问题是您的输出索引不正确。这是您的代码的简化版本,展示了任意的正确结果GRID_SIZE

$ cat t1360.cu
#include <stdio.h>
#include <cub/cub.cuh>
#define BLOCK_SIZE 32
#define GRID_SIZE 25
__global__
void add(float* blocksum)
{
   float E = 1.0;
  // Block wise reduction so that one thread in each block holds sum of thread results
    typedef cub::BlockReduce<float, BLOCK_SIZE, cub::BLOCK_REDUCE_RAKING, BLOCK_SIZE> BlockReduce;

    __shared__ typename BlockReduce::TempStorage temp_storage;
    float aggregate = BlockReduce(temp_storage).Sum(E);
    __syncthreads();
    if (threadIdx.x == 0 && threadIdx.y == 0)
        blocksum[blockIdx.y*gridDim.x + blockIdx.x] = aggregate;
}

int main(){

  float *d_result, *h_result;
  h_result = (float *)malloc(GRID_SIZE*GRID_SIZE*sizeof(float));
  cudaMalloc(&d_result, GRID_SIZE*GRID_SIZE*sizeof(float));
  dim3 grid  = dim3(GRID_SIZE,GRID_SIZE);
  dim3 block = dim3(BLOCK_SIZE, BLOCK_SIZE);
  add<<<grid, block>>>(d_result);
  cudaMemcpy(h_result, d_result, GRID_SIZE*GRID_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
  cudaError_t err = cudaGetLastError();
  if (err != cudaSuccess) {printf("cuda error: %s\n", cudaGetErrorString(err)); return -1;}
  float result = 0;
  for (int i = 0; i < GRID_SIZE*GRID_SIZE; i++) result += h_result[i];
  if (result != (float)(GRID_SIZE*GRID_SIZE*BLOCK_SIZE*BLOCK_SIZE)) printf("mismatch, should be: %f, was: %f\n", (float)(GRID_SIZE*GRID_SIZE*BLOCK_SIZE*BLOCK_SIZE), result);
  else printf("Success\n");
  return 0;
}

$ nvcc -o t1360 t1360.cu
$ ./t1360
Success
$

我对您的内核代码所做的重要更改是在输出索引中:

blocksum[blockIdx.y*gridDim.x + blockIdx.x] = aggregate;

我们想要一个模拟的二维索引到一个数组中,该数组的宽度和高度GRID_SIZE由每个点一个float数量组成。因此,该数组的宽度由gridDim.x(not blockDim) 给出。该gridDim变量以块的形式给出了网格的尺寸——这与我们的结果数组的设置方式完全一致。

GRID_SIZE如果和不同,您发布的代码将失败BLOCK_SIZE(例如,如果GRID_SIZE小于BLOCK_SIZEcuda-memcheck将显示非法访问,如果GRID_SIZE大于,BLOCK_SIZE则此索引错误将导致块覆盖输出数组中彼此的值),因为这种混淆blockDimgridDim

另请注意,float操作通常只有大约 5 个十进制数字的精度。小数点后第 5 位或第 6 位的微小差异可能是由于进行浮点运算时操作顺序的差异。你可以通过改用double算术来证明这一点。

于 2018-06-02T00:32:03.707 回答