我正在开发一个 CUDA 应用程序,它有一些用于在共享内存中分配和释放数组的例程。
在这个应用程序中(很抱歉,我无法提供),我有一个类将一块内存封装为一个数组。这个类有一个count
方法来计算匹配某个值的元素的数量。
所以,想象一下(这是整个班级的实际部分)
template <class Type>
struct Array {
// ...
Type &operator[](int i) { return data_[i]; }
Type operator[](int i) const { return data_[i]; }
size_t count(const Type &val) const {
size_t c = 0;
for (size_t i = 0; i < len_; ++i)
if (data_[i] == val)
++c;
return c;
}
void print(const char *fmt, const char *sep, const char *end) const {
for (size_t i = 0; i < len_ - 1; ++i) {
printf(fmt, data_[i]);
printf(sep);
}
printf(fmt, _data[len_ - 1]);
printf(end);
}
private:
Type *data_;
size_t len_;
};
假设我正在访问的内存是正确分配的(在运行时分配的共享内存,将维度传递给内核),它足够大以包含数据并且data_
指向共享内存内Type
的对齐(wrt )区域。我多次检查了这一点,这些假设应该是有效的(但请随时提出更多检查)。
现在,在测试代码时,我发现了一些非常奇怪的东西:
- 当使用 显式分配值
operator[]
并使用 读取它们operator[] const
时,不会出现任何问题。 - 使用 读取数据时
print
,不会出现任何问题。 - 调用时
count()
,程序崩溃Address ADDR is out of bounds
并被cuda-memcheck报告,由Invalid __global__ read of size x
(x = sizeof(Type))引起。ADDR 在共享内存缓冲区内,因此它应该是有效的。 - 如果在里面
count
,我替换data_[i]
为(*this)[i]
,程序运行良好并且不会发生崩溃。
现在,我完全不知道会发生这种情况,也不知道要检查什么以查看幕后发生的事情……为什么直接阅读会崩溃?为什么使用operator[]
不?为什么阅读(直接?)里面print
不会崩溃?
我知道这个问题很难,很抱歉提供有关代码的这个小信息......但请随时询问详细信息,我会尽可能多地回答。欢迎任何想法或建议,因为这是我试图解决的日子,这是我所能得到的。
我正在使用两种不同的 GPU 来测试此代码,一个具有 2.1 的能力,一个具有 3.5 的能力(后者提供了有关此崩溃的详细信息,而第一个没有)。CUDA 5.0
编辑:我找到了一个发生此错误的最小示例。奇怪的是,使用 sm_20 和 sm_35 编译时会出现错误,但在 sm_30 上不会出现。我使用的 GPU 的上限为 3.5
/* Compile and run with:
nvcc -g -G bug.cu -o bug -arch=sm_20 # bug!
nvcc -g -G bug.cu -o bug -arch=sm_30 # no bug :|
nvcc -g -G bug.cu -o bug -arch=sm_35 # bug!
cuda-memcheck bug
Here's the output (skipping the initial rows) I get
Ctor for 0x3fffc10 w/o alloc, data 0x10000c8
Calling NON CONST []
Calling NON CONST []
Fill with [] ok
Fill with raw ok
Kernel launch failed with error:
unspecified launch failure
========= Invalid __global__ write of size 8
========= at 0x00000188 in /home/bio/are/AlgoCUDA/bug.cu:26:array<double>::fill(double const &)
========= by thread (0,0,0) in block (0,0,0)
========= Address 0x010000c8 is out of bounds
========= Device Frame:/home/bio/are/AlgoCUDA/bug.cu:49:kernel_bug(unsigned long) (kernel_bug(unsigned long) : 0x8c0)
========= Saved host backtrace up to driver entry point at kernel launch time
========= Host Frame:/usr/lib/libcuda.so (cuLaunchKernel + 0x3dc) [0xc9edc]
========= Host Frame:/opt/cuda-5.0/lib64/libcudart.so.5.0 [0x13324]
========= Host Frame:/opt/cuda-5.0/lib64/libcudart.so.5.0 (cudaLaunch + 0x182) [0x3ac62]
========= Host Frame:bug [0xbb8]
========= Host Frame:bug [0xaa7]
========= Host Frame:bug [0xac4]
========= Host Frame:bug [0xa07]
========= Host Frame:/lib/libc.so.6 (__libc_start_main + 0xfd) [0x1ec4d]
========= Host Frame:bug [0x8c9]
=========
========= Program hit error 4 on CUDA API call to cudaDeviceSynchronize
========= Saved host backtrace up to driver entry point at error
========= Host Frame:/usr/lib/libcuda.so [0x26a180]
========= Host Frame:/opt/cuda-5.0/lib64/libcudart.so.5.0 (cudaDeviceSynchronize + 0x1dd) [0x441fd]
========= Host Frame:bug [0xa0c]
========= Host Frame:/lib/libc.so.6 (__libc_start_main + 0xfd) [0x1ec4d]
========= Host Frame:bug [0x8c9]
=========
========= ERROR SUMMARY: 2 errors
(cuda-gdb) set cuda memcheck on
(cuda-gdb) run
Starting program: /home/bio/are/AlgoCUDA/bug
[Thread debugging using libthread_db enabled]
[New Thread 0x7ffff5c25700 (LWP 23793)]
[Context Create of context 0x625870 on Device 0]
[Launch of CUDA Kernel 0 (kernel_bug<<<(1,1,1),(1,1,1)>>>) on Device 0]
Memcheck detected an illegal access to address (@global)0x10000c8
Program received signal CUDA_EXCEPTION_1, Lane Illegal Address.
[Switching focus to CUDA kernel 0, grid 1, block (0,0,0), thread (0,0,0), device 0, sm 12, warp 0, lane 0]
0x0000000000881928 in array<double>::fill (this=0x3fffc10, v=0x3fffc08) at bug.cu:26
26 data[i] = v;
*/
#include <stdio.h>
extern __shared__ char totalSharedMemory[];
template <class Type>
struct array {
// Create an array using a specific buffer
__device__ __host__ array(size_t len, Type *buffer):
len(len),
data(buffer) {
printf("Ctor for %p w/o alloc, data %p\n", this, data);
}
__device__ __host__ Type operator[](int i) const {
printf("Calling CONST []\n");
return data[i];
}
__device__ __host__ Type &operator[](int i) {
printf("Calling NON CONST []\n");
return data[i];
}
__device__ __host__ void fill(const Type &v) {
for (size_t i = 0; i < len; ++i) data[i] = v;
}
size_t len;
Type *data;
};
__global__ void kernel_bug(size_t bytesPerBlock) {
// This is a test writing to show that filling the memory
// does not produce any error
for (size_t i = 0; i < bytesPerBlock; ++i) {
totalSharedMemory[i] = i % ('z' - 'a' + 1) + 'a';
printf("[%p] %c\n", totalSharedMemory + i, totalSharedMemory[i]);
}
// 200 / 8 = 25 so should be aligned
array<double> X(2, (double *)(totalSharedMemory + 200));
X[0] = 2;
X[1] = 4;
printf("Fill with [] ok\n");
X.data[0] = 1;
X.data[1] = 0;
printf("Fill with raw ok\n");
X.fill(0); // Crash here
printf("Fill with method ok\n");
}
int main(int argc, char **argv) {
// Total memory required
size_t bytesPerBlock = 686; // Big enough for 85 doubles
kernel_bug<<<1, 1, bytesPerBlock>>>(bytesPerBlock);
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "Kernel launch failed with error:\n\t%s\n", cudaGetErrorString(err));
return 1;
}
return 0;
}
编辑:也用 CUDA 4.2 测试过,问题仍然存在。