1

以下代码来自amd官网

__kernel
void reduce(__global float* buffer,
            __local float* scratch,
            __const int length,
            __global float* result) {

  int global_index = get_global_id(0);
  float accumulator = INFINITY;
  // Loop sequentially over chunks of input vector
  while (global_index < length) {
    float element = buffer[global_index];
    accumulator = (accumulator < element) ? accumulator : element;
    global_index += get_global_size(0);
  }

  // Perform parallel reduction
  int local_index = get_local_id(0);
  scratch[local_index] = accumulator;
  barrier(CLK_LOCAL_MEM_FENCE);
  for(int offset = get_local_size(0) / 2;
      offset > 0;
      offset = offset / 2) {
    if (local_index < offset) {
      float other = scratch[local_index + offset];
      float mine = scratch[local_index];
      scratch[local_index] = (mine < other) ? mine : other;
    }
    barrier(CLK_LOCAL_MEM_FENCE);
  }
  if (local_index == 0) {
     result[get_group_id(0)] = scratch[0];
  }
}

我对其进行了调整以使其作为总和减少工作:

__kernel
void reduce(__global float* buffer,
            __local float* scratch,
            __const int length,
            __global float* result) {

  int global_index = get_global_id(0);
  float accumulator = 0.0;
  // Loop sequentially over chunks of input vector
  while (global_index < length) {
    float element = buffer[global_index];
    accumulator = accumulator + element;
    global_index += get_global_size(0);
  }

  // Perform parallel reduction
  int local_index = get_local_id(0);
  scratch[local_index] = accumulator;
  barrier(CLK_LOCAL_MEM_FENCE);
  for(int offset = get_local_size(0) / 2;
      offset > 0;
      offset = offset / 2) {
    if (local_index < offset) {
      float other = scratch[local_index + offset];
      float mine = scratch[local_index];
      scratch[local_index] = mine + other;
    }
    barrier(CLK_LOCAL_MEM_FENCE);
  }
  if (local_index == 0) {
     result[get_group_id(0)] = scratch[0];
  }
}

当我只使用一个工作组时,它就像一种魅力(意思是我给予NULLlocal_work_sizeclEnqueueNDRangeKernel()但是当我尝试改变工作组维度时,事情就超出了我的控制。(我应该说我是 OpenCl 的新手)

我所做的如下

#define GLOBAL_DIM 600
#define WORK_DIM 60

size_t global_1D[3] = {GLOBAL_DIM,1,1};
size_t work_dim[3] = {WORK_DIM,1,1};
err = clEnqueueNDRangeKernel(commands, av_velocity_kernel, 1, NULL, global_1D, work_dim, 0, NULL, NULL); //TODO CHECK THIS LINE
if (err)    {
  printf("Error: Failed to execute av_velocity_kernel!\n");            printf("\n%s",err_code(err));   fflush(stdout);      return EXIT_FAILURE;    }

我做错了吗?

此外,我注意到如果我设置#define GLOBAL_DIM 60000(这是我需要的)我会用完本地内存。如果我使用多个工作组,我是否会获得“更多”本地内存,或者本地内存在工作组之间均匀分布?

4

1 回答 1

0

First of all, those reduction kernels only work correctly if the workgroup size is a power of two. This means that instead of 60 you should use something 64. Also, there is no way that changing the GLOBAL_DIM makes you run out of local memory: you're most probably doing something wrong when invoking the kernel.

于 2012-05-05T19:08:49.373 回答