3

What is the difference between the following two functions?

__device__ inline void comparator_volatile(volatile float &A, volatile float &B, uint dir) {
float t;
if ((A > B) == dir) {
    t = A;
    A = B;
    B = t;
  }
}

__device__ inline void comparator(float &A, float &B, uint dir) {
float t;
if ((A > B) == dir) {
    t = A;
    A = B;
    B = t;
  }
  __threadfence();
}

Could anyone help me?

I implement BitonicSort in some different versions based on CUDA SDK version. For ATOMIC version (bitonicSortAtomic), I tried to use __threadfence() in __syncblocks_atomic to maintain memory consistency. But it doesn't work (the output is incorrect). I have to call comparator_volatile instead of comparator, then I get correct result. Any idea? The BitonicSort benchmark:

// (C) Copyright 2013, University of Illinois. All Rights Reserved
#include <stdlib.h>
#include <stdio.h>
#include "parboil.h"

#define THREADS 256
#define BLOCKS 32
#define NUM_VALS 2*THREADS*BLOCKS

__device__ volatile int mutex = 0;
__device__ inline void __syncblocks_atomic(int goal) {
    __syncthreads();
//  __threadfence();
    int tx = threadIdx.x;
    if (tx == 0) {
        atomicAdd((int *)&mutex, 1);
        while(g_mutex != goal) {}
    }
    __syncthreads();
}

__device__ inline void comparator(float &A, float &B, uint dir) {
    float t;
    if ((A > B) == dir) {
        t = A;
        A = B;
        B = t;
    }
}

__device__ inline void comparator_volatile(volatile float &A, volatile float &B, uint dir) {
    float t;
    if ((A > B) == dir) {
        t = A;
        A = B;
        B = t;
    }
}

#ifdef NAIVE
__global__ void bitonicSortNaive(float *src, int stride, int size) {
  unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
  uint dir = (tid & (size / 2)) == 0;
  unsigned int pos = 2*tid - (tid & (stride - 1));
  comparator(src[pos], src[pos+stride], dir);
}
#endif

#ifdef ATOMIC
__global__ void bitonicSortAtomic(float *src, int length) {
  uint numBlocks = gridDim.x * gridDim.y * gridDim.z;
  uint goalVal = 0;
  uint tid = threadIdx.x + blockDim.x * blockIdx.x;
  for(uint size=2; size<=length; size<<=1) {
    for(uint stride=size>>1; stride>0; stride=stride>>1) {
      uint dir = (tid & (size / 2)) == 0;
      uint pos = 2*tid - (tid & (stride - 1));
      comparator_volatile(src[pos], src[pos+stride], dir);
      if(stride>THREADS || (stride==1 && size>=THREADS)) {
        goalVal += numBlocks;
        __syncblocks_atomic(goalVal);
      }
      else
        __syncthreads();
    } // end for stride
  } // end for size
}
#endif

int main() {
  printf("[BENCH] Bitonic Sort %d elements\n", NUM_VALS);
  printf("[BENCH] Xuhao Chen <cxh@illinois.edu>\n");
#ifdef NAIVE
  printf("[BENCH] Naive version\n");
#endif
#ifdef ATOMIC
  printf("[BENCH] Atomic Barrier\n");
#endif
  float *values = (float*) malloc( NUM_VALS * sizeof(float));
  array_init(values, NUM_VALS);
  float *dev_values;
  size_t size = NUM_VALS * sizeof(float);
  cudaMalloc((void**) &dev_values, size);
  cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
  dim3 blocks(BLOCKS,1);
  dim3 threads(THREADS,1);
  cudaDeviceSynchronize();

#ifdef NAIVE
  int j, k;
  for (k = 2; k <= NUM_VALS; k <<= 1) {
    for (j=k>>1; j>0; j=j>>1) {
      bitonicSortNaive<<<blocks, threads>>>(dev_values, j, k);
    }
  }
#endif

#ifdef ATOMIC
  bitonicSortAtomic<<<blocks, threads>>>(dev_values, NUM_VALS);
#endif

  cudaDeviceSynchronize();
  cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
  cudaFree(dev_values);
  free(values);
}

__syncblocks_atomic is a function to implement global barrier. Since there is inter-block communication, I have to keep data consistency.

4

1 回答 1

1

CUDA 编程指南指出:

如果位于全局或共享内存中的变量被声明为易失性,编译器假定它的值可以随时被另一个线程更改或使用,因此对该变量的任何引用都会编译为实际的内存读取或写入指令。

这基本上意味着内存将在您为变量赋值时立即刷新,并且在您尝试读取其值时直接从内存中获取(没有缓存)。

在您的第一个代码示例中,由于 A 和 B 都是易失性的,因此会生成 6 条实际内存指令。每次使用 A 或 B 时都进行一次读/写。好处是其他线程将能够在进行修改时更早地看到这些修改。缺点是执行会更慢,因为缓存将被禁用。

另一方面,在您的第二个代码示例中,GPU 被授权使用缓存来加速其执行,直到函数结束时,它被迫发出内存写入。如果 A 和 B 都已缓存,则仅发出 2 次内存写入。缺点是其他线程可能只能在栅栏之后看到更改的值。

您应该考虑的另一件事是操作不是原子的。如果其他线程在您的函数执行时尝试访问 A 和 B,它们可能会看到函数的部分执行,在这两种情况下。在第二个代码示例中,这不太可能发生,因为线程可能会使用其缓存值,并立即刷新最终值(无论如何,您不应该依赖它)。

此外, volatile 在同一warp中的线程之间作为 __threadfence() 的更快版本工作(因为 warp 中的线程同步运行)。

于 2013-08-09T20:42:29.783 回答