1

我有一个从图像制作直方图的功能(给定的顺序版本(家庭作业))

CImg< unsigned char > histogramImage = CImg< unsigned char >(BAR_WIDTH * HISTOGRAM_SIZE, HISTOGRAM_SIZE, 1, 1);
unsigned int *histogram;
histogram = (unsigned int *)malloc(HISTOGRAM_SIZE * sizeof(unsigned int));
 memset(reinterpret_cast< void * >(histogram), 0, HISTOGRAM_SIZE * sizeof(unsigned int));

cudaMemset(gpuImage, 0, grayImage.width() * grayImage.height() * sizeof(unsigned char));

cuda_err = cudaMemcpy(gpuImage, grayImage, grayImage.width() * grayImage.height() * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_err != cudaSuccess)
{
    std::cout << "ERROR: Failed cudaMemcpy" << std::endl;
   return -1;
}

unsigned int *gpuhistogram;
cuda_err = cudaMalloc((void **)(&gpuhistogram), HISTOGRAM_SIZE * sizeof(unsigned int));
if (cuda_err != cudaSuccess)
{
    std::cout << "ERROR: Failed cudaMalloc" << std::endl;
}
cudaMemset (gpuhistogram, 0, HISTOGRAM_SIZE * sizeof(unsigned int));

histogram1D(gpuImage, histogramImage, grayImage.width(), grayImage.height(), gpuhistogram, HISTOGRAM_SIZE, BAR_WIDTH, total, gridSize, blockSize);

cuda_err = cudaMemcpy(histogram, gpuhistogram, HISTOGRAM_SIZE * sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (cuda_err != cudaSuccess)
{
    std::cout << "ERROR: Failed cudaMemcpy" << std::endl;
}

那叫

void histogram1D(unsigned char *grayImage, unsigned char *histogramImage, const int width, const int height, unsigned int *histogram, const unsigned int HISTOGRAM_SIZE, const unsigned int BAR_WIDTH, NSTimer &timer, dim3 grid_size, dim3 block_size) {

NSTimer kernelTime = NSTimer("kernelTime", false, false);

kernelTime.start();
histo <<< grid_size, block_size >>> (grayImage, histogram,width);
cudaDeviceSynchronize();
kernelTime.stop();

cout << fixed << setprecision(6);
cout << "histogram1D (kernel): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
}

核函数是

__global__ void histo(unsigned char *inputImage, unsigned int *histogram, int width)
{

int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);

unsigned int index = static_cast< unsigned int >(inputImage[(y * width) + x]);
atomicAdd(&histogram[index],1);
}

我遇到的问题是,当我用 1024x1024 到 3543x2480 的图像调用它时,它可以工作。但是,我有一张 8192x8192 的图像,当函数返回时,* histogram 中的值仍然为 0。我的试验似乎表明它与 *gpuhistogram 的内存分配有关(unsigned int 不应该足够大?)因为这个工作的顺序版本。如何解决这个问题?有任何想法吗?

4

2 回答 2

0
  1. 检查你的卡。来自维基百科:

    技术规格 计算能力(版本) 1.0 1.1 1.2 1.3 2.x 3.0 3.5 线程块网格的最大维度 2 3 线程块网格的最大 x、y 或 z 维度 65535 231-1

  2. 我怀疑您的直方图的性能会比 CPU 代码差,尝试使用共享内存之类的东西并假设 256 个值。诀窍是每个块使用 bin# of threads(每个块 256 个线程)。我不想破坏作者的收入,所以请参阅CUDA by Example 2010

于 2013-02-01T09:54:46.943 回答
0

只是想补充;按照米哈伊尔的回答,这就是我现在正在做的事情;

void histogram1D(unsigned char *grayImage, unsigned char *histogramImage, const int width, const int height, unsigned int *histogram, const unsigned int HISTOGRAM_SIZE, const unsigned int BAR_WIDTH, NSTimer &timer, dim3 grid_size, dim3 block_size) {

NSTimer kernelTime = NSTimer("kernelTime", false, false);


kernelTime.start();
// Kernel
histo <<< 15*2, 256 >>> (grayImage, histogram,width,height);//15 is the number of blocks for my device
//cudaDeviceSynchronize(); //i get slow results with this. figured it's not nessesary since the kernel threads are synced.
kernelTime.stop();

cout << fixed << setprecision(6);
cout << "histogram1D (kernel): \t\t" << kernelTime.getElapsed()*1000 << " milliseconds." << endl;
}

内核代码;

__global__ void histo(unsigned char *inputImage, unsigned int *histogram, int width, int height)
{
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;

__syncthreads();

int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.y * gridDim.x;
while(i<width*height)
{
    atomicAdd(&temp[inputImage[i]],1);
    i += offset;
}

__syncthreads();
atomicAdd(&(histogram[threadIdx.x]),temp[threadIdx.x]);
}
于 2013-02-04T07:22:54.257 回答