1

我正在尝试通过 cuda 执行三重黎曼和。我正在尝试将多维网格迭代器用于我的 sum 迭代器以避免嵌套循环。我使用的是 2.0 telsa 卡,因此无法使用嵌套内核。

对于我需要的每个 x,y,z 变量,我似乎没有得到完整的 0 -> N 迭代。

__global__ void test(){
uint xIteration = blockDim.x * blockIdx.x + threadIdx.x;
uint yIteration = blockDim.y * blockIdx.y + threadIdx.y;
uint zIteration = blockDim.z * blockIdx.z + threadIdx.z;
printf("x: %d * %d + %d = %d\n y: %d * %d + %d = %d\n z: %d * %d + %d = %d\n", blockDim.x, blockIdx.x, threadIdx.x, xIteration, blockDim.y, blockIdx.y, threadIdx.y, yIteration,  blockDim.z, blockIdx.z, threadIdx.z, zIteration);
}

---- 由 ----- 调用

int totalIterations = 128; // N value for single sum (i = 0; i < N)
dim3 threadsPerBlock(8,8,8);
dim3 blocksPerGrid((totalIterations + threadsPerBlock.x - 1) / threadsPerBlock.x, 
                   (totalIterations + threadsPerBlock.y - 1) / threadsPerBlock.y, 
                   (totalIterations + threadsPerBlock.z - 1) / threadsPerBlock.z);
test<<<blocksPerGrid, threadsPerBlock>>>();

- - 输出 - - -

x   y   z
...
7 4 0
7 4 1
7 4 2
7 4 3
7 4 4
7 4 5
7 4 6
7 4 7
7 5 0
7 5 1
7 5 2
7 5 3
7 5 4
7 5 5
7 5 6
7 5 7
7 6 0
7 6 1
7 6 2
7 6 3
7 6 4
7 6 5
7 6 6
7 6 7
7 7 0
7 7 1
7 7 2
7 7 3
7 7 4
7 7 5
7 7 6
7 7 7
...

输出被截断,我现在得到每个排列,对于 0 < x,y,z < 7,但是当 totalIterations 为 128 时我需要 0 < x,y,z < 127。例如,在这个执行中,40 < z < 49 , 它应该是 0 <= z <= 127。我对多重暗淡网格的理解可能是错误的,但是对于黎曼,每个迭代器 x、y 和 z 必须具有 0 到 127 的值。

此外,如果我让 totalIterations > 128,例如 1024,程序以 6 的 cudaError 代码终止,我理解这是启动计时器到期。内核除了打印什么都不做,所以我不明白为什么它会超时。在辅助设备上运行它似乎暂时消除了这个问题。我们正在使用其中一个特斯拉来运行 X,但邮件中的 geforce 将成为新的显示设备,以释放两个特斯拉进行计算。

printf(...) 将被要求和的函数的执行所取代。

这个想法是替换的序列号版本

for (int i = 0...)
    for (int j = 0 ..)
       for (int k = 0...)

我也不确定如何存储函数值,因为创建一个潜在的巨大(数百万 x 数百万 x 数百万)3D 数组然后减少它似乎没有内存效率,而是以某种方式将函数值连接到某种共享多变的。

---- 设备信息(我们有 2 个这些卡,两者的输出相同----

Device 1: "Tesla C2050"
  CUDA Driver Version / Runtime Version          5.0 / 5.0
  CUDA Capability Major/Minor version number:    2.0
  Total amount of global memory:                 2687 MBytes (2817982464 bytes)
  (14) Multiprocessors x ( 32) CUDA Cores/MP:    448 CUDA Cores
  GPU Clock rate:                                1147 MHz (1.15 GHz)
  Memory Clock rate:                             1500 Mhz
  Memory Bus Width:                              384-bit
  L2 Cache Size:                                 786432 bytes
  Max Texture Dimension Size (x,y,z)             1D=(65536), 2D=(65536,65535), 3D=(2048,2048,2048)
  Max Layered Texture Size (dim) x layers        1D=(16384) x 2048, 2D=(16384,16384) x 2048
  Total amount of constant memory:               65536 bytes
  Total amount of shared memory per block:       49152 bytes
  Total number of registers available per block: 32768
  Warp size:                                     32
  Maximum number of threads per multiprocessor:  1536
  Maximum number of threads per block:           1024
  Maximum sizes of each dimension of a block:    1024 x 1024 x 64
  Maximum sizes of each dimension of a grid:     65535 x 65535 x 65535
  Maximum memory pitch:                          2147483647 bytes
  Texture alignment:                             512 bytes
  Concurrent copy and execution:                 Yes with 2 copy engine(s)
  Run time limit on kernels:                     No
  Integrated GPU sharing Host Memory:            No
  Support host page-locked memory mapping:       Yes
  Concurrent kernel execution:                   Yes
  Alignment requirement for Surfaces:            Yes
  Device has ECC support enabled:                Yes
  Device is using TCC driver mode:               No
  Device supports Unified Addressing (UVA):      Yes
  Device PCI Bus ID / PCI location ID:           132 / 0
  Compute Mode:
     < Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) >
4

1 回答 1

1

我认为正如已经提到的那样,在设备代码中使用 printf 来验证 (x,y,z) 数组中的每个元素是否已被线程触及对于较大的 x,y,z 值是不明智的。

我根据您的代码创建了以下内容,以证明每个元素 x,y,z 都被线程触及:

#include <stdio.h>
#define DATAVAL 1
#define cudaCheckErrors(msg) \
    do { \
        cudaError_t __err = cudaGetLastError(); \
        if (__err != cudaSuccess) { \
            fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                msg, cudaGetErrorString(__err), \
                __FILE__, __LINE__); \
            fprintf(stderr, "*** FAILED - ABORTING\n"); \
            exit(1); \
        } \
    } while (0)

__global__ void test(int *data, int dim){
  uint xIteration = blockDim.x * blockIdx.x + threadIdx.x;
  uint yIteration = blockDim.y * blockIdx.y + threadIdx.y;
  uint zIteration = blockDim.z * blockIdx.z + threadIdx.z;

  data[((((zIteration*dim)+yIteration)*dim)+xIteration)]=DATAVAL;
}

int main(){
  int *testdata;
  int *result;
  int totalIterations = 128; // N value for single sum (i = 0; i < N)
  int testsize = totalIterations*totalIterations*totalIterations;
  dim3 threadsPerBlock(8,8,8);
  dim3 blocksPerGrid((totalIterations + threadsPerBlock.x - 1) / threadsPerBlock.x,  (totalIterations + threadsPerBlock.y - 1) / threadsPerBlock.y,  (totalIterations + threadsPerBlock.z - 1) / threadsPerBlock.z);
  cudaMalloc(&testdata, testsize*sizeof(int));
  cudaCheckErrors("cudaMalloc fail");
  cudaMemset(testdata, 0, testsize*sizeof(int));
  cudaCheckErrors("cudaMemset fail");
  result=(int *)malloc(testsize*sizeof(int));
  if (result == 0) {printf("malloc fail \n"); return 1;}
  memset(result, 0, testsize*sizeof(int));
  test<<<blocksPerGrid, threadsPerBlock>>>(testdata, totalIterations);
  cudaDeviceSynchronize();
  cudaCheckErrors("Kernel launch failure");
  cudaMemcpy(result, testdata, testsize*sizeof(int), cudaMemcpyDeviceToHost);
  cudaCheckErrors("cudaMemcpy failure");

  for (unsigned i=0; i<testsize; i++)
    if (result[i] != DATAVAL) {printf("fail! \n"); return 1;}

  printf("Success \n");
  return 0;

}
于 2012-09-25T03:41:55.217 回答