3

我正在尝试在 CUDA 中实现 FIR(有限脉冲响应)滤波器。我的方法很简单,看起来有点像这样:

#include <cuda.h>

__global__ void filterData(const float *d_data,
                           const float *d_numerator, 
                           float *d_filteredData, 
                           const int numeratorLength,
                           const int filteredDataLength)
{
    int i = blockDim.x * blockIdx.x + threadIdx.x;

    float sum = 0.0f;

    if (i < filteredDataLength)
    {
        for (int j = 0; j < numeratorLength; j++)
        {
            // The first (numeratorLength-1) elements contain the filter state
            sum += d_numerator[j] * d_data[i + numeratorLength - j - 1];
        }
    }

    d_filteredData[i] = sum;
}

int main(void)
{
    // (Skipping error checks to make code more readable)

    int dataLength = 18042;
    int filteredDataLength = 16384;
    int numeratorLength= 1659;

    // Pointers to data, filtered data and filter coefficients
    // (Skipping how these are read into the arrays)
    float *h_data = new float[dataLength];
    float *h_filteredData = new float[filteredDataLength];
    float *h_filter = new float[numeratorLength];


    // Create device pointers
    float *d_data = nullptr;
    cudaMalloc((void **)&d_data, dataLength * sizeof(float));

    float *d_numerator = nullptr;
    cudaMalloc((void **)&d_numerator, numeratorLength * sizeof(float));

    float *d_filteredData = nullptr;
    cudaMalloc((void **)&d_filteredData, filteredDataLength * sizeof(float));


    // Copy data to device
    cudaMemcpy(d_data, h_data, dataLength * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_numerator, h_numerator, numeratorLength * sizeof(float), cudaMemcpyHostToDevice);  

    // Launch the kernel
    int threadsPerBlock = 256;
    int blocksPerGrid = (filteredDataLength + threadsPerBlock - 1) / threadsPerBlock;
    filterData<<<blocksPerGrid,threadsPerBlock>>>(d_data, d_numerator, d_filteredData, numeratorLength, filteredDataLength);

    // Copy results to host
    cudaMemcpy(h_filteredData, d_filteredData, filteredDataLength * sizeof(float), cudaMemcpyDeviceToHost);

    // Clean up
    cudaFree(d_data);
    cudaFree(d_numerator);
    cudaFree(d_filteredData);

    // Do stuff with h_filteredData...

    // Clean up some more
    delete [] h_data;
    delete [] h_filteredData;
    delete [] h_filter;
}

过滤器有效,但由于我是 CUDA 编程的新手,我不确定如何优化它。

我看到的一个小问题是dataLength,filteredDataLengthnumeratorLength在我打算在其中使用过滤器的应用程序中事先不知道。此外,即使dataLength是上述代码中的倍数32,也不能保证在最后应用。

当我将上面的代码与 ArrayFire 进行比较时,我的代码需要大约三倍的时间来执行。

有人对如何加快速度有任何想法吗?

编辑:已全部更改filterLengthnumeratorLength.

4

3 回答 3

2

我可以建议以下内容来加快您的代码速度:

  1. 使用共享内存:它是一种类似于缓存的微型内存,但比全局卡内存快得多。您可以通过在 CUDA 文档中查找 __shared__ 关键字来找到有关它的更多信息。例如,您可以在共享内存中预取过滤器分子和大块数据,这将显着提高您的性能。在这种情况下,您需要特别注意数据对齐,因为它确实很重要,并且会减慢您的代码速度。
  2. 考虑展开分子和的 for 循环。您可以查看 CUDA 文档中的 reduce-vector 示例。
  3. 您还可以考虑将分子循环本身并行化。这可以通过向您的线程块添加一个额外的维度(比如“y”)来完成。您还需要将 sum 设为具有 numeratorLength 维度的共享向量。您还可以查看减少向量示例,了解如何在最后快速获取该向量的总和。
于 2013-04-07T00:12:42.373 回答
1

您正在尝试通过 CUDA 内核直接评估一维卷积来计算滤波器输出。

在滤波器脉冲响应持续时间较长的情况下,您可以做的一件事来评估滤波后的输入,即使用 FFT 在共轭域中直接执行计算。下面我报告一个使用 CUDA Thrust 和 cuFFT 库的示例代码。它是在报告的基于 Matlab 的示例的直接翻译

FFT卷积的低通滤波

让我否认此代码可以进行一些优化,但我更愿意将其保留原样,以便与 Matlab 的对应部分相比更容易。

#include <stdio.h>
#include <math.h>

#include <cufft.h>

#include <thrust\device_vector.h>
#include <thrust\sequence.h>

#define pi_f  3.14159265358979f                 // Greek pi in single precision

/****************/
/* SIN OPERATOR */
/****************/
class sin_op {

    float fk_, Fs_;

    public:

        sin_op(float fk, float Fs) { fk_ = fk; Fs_ = Fs; }

        __host__ __device__ float operator()(float x) const { return sin(2.f*pi_f*x*fk_/Fs_); }
};

/*****************/
/* SINC OPERATOR */
/*****************/
class sinc_op {

    float fc_, Fs_;

    public:

        sinc_op(float fc, float Fs) { fc_ = fc; Fs_ = Fs; }

        __host__ __device__ float operator()(float x) const 
        {
            if (x==0)   return (2.f*fc_/Fs_);
            else            return (2.f*fc_/Fs_)*sin(2.f*pi_f*fc_*x/Fs_)/(2.f*pi_f*fc_*x/Fs_);
        }
};

/********************/
/* HAMMING OPERATOR */
/********************/
class hamming_op {

    int L_;

    public:

        hamming_op(int L) { L_ = L; }

        __host__ __device__ float operator()(int x) const 
        {
            return 0.54-0.46*cos(2.f*pi_f*x/(L_-1));
        }
};


/*********************************/
/* MULTIPLY CUFFTCOMPLEX NUMBERS */
/*********************************/
struct multiply_cufftComplex {
    __device__ cufftComplex operator()(const cufftComplex& a, const cufftComplex& b) const {
        cufftComplex r;
        r.x = a.x * b.x - a.y * b.y;
        r.y = a.x * b.y + a.y * b.x;
        return r;
    }
};

/********/
/* MAIN */
/********/
void main(){

    // Signal parameters:
    int M = 256;                            // signal length
    const int N = 4;
    float f[N] = { 440, 880, 1000, 2000 };              // frequencies
    float Fs = 5000.;                       // sampling rate

    // Generate a signal by adding up sinusoids:
    thrust::device_vector<float> d_x(M,0.f);            // pre-allocate 'accumulator'
    thrust::device_vector<float> d_n(M);                // discrete-time grid
    thrust::sequence(d_n.begin(), d_n.end(), 0, 1);

    thrust::device_vector<float> d_temp(M);
    for (int i=0; i<N; i++) { 
        float fk = f[i];
        thrust::transform(d_n.begin(), d_n.end(), d_temp.begin(), sin_op(fk,Fs));
        thrust::transform(d_temp.begin(), d_temp.end(), d_x.begin(), d_x.begin(), thrust::plus<float>()); 
    }

    // Filter parameters:
    int L = 257;                        // filter length
    float fc = 600.f;                   // cutoff frequency

    // Design the filter using the window method:
    thrust::device_vector<float> d_hsupp(L);            
    thrust::sequence(d_hsupp.begin(), d_hsupp.end(), -(L-1)/2, 1);
    thrust::device_vector<float> d_hideal(L);           
    thrust::transform(d_hsupp.begin(), d_hsupp.end(), d_hideal.begin(), sinc_op(fc,Fs));
    thrust::device_vector<float> d_l(L);                
    thrust::sequence(d_l.begin(), d_l.end(), 0, 1);
    thrust::device_vector<float> d_h(L);                
    thrust::transform(d_l.begin(), d_l.end(), d_h.begin(), hamming_op(L));
    // h is our filter
    thrust::transform(d_hideal.begin(), d_hideal.end(), d_h.begin(), d_h.begin(), thrust::multiplies<float>());  

    // --- Choose the next power of 2 greater than L+M-1
    int Nfft = pow(2,(ceil(log2((float)(L+M-1))))); // or 2^nextpow2(L+M-1)

    // Zero pad the signal and impulse response:
    thrust::device_vector<float> d_xzp(Nfft,0.f);
    thrust::device_vector<float> d_hzp(Nfft,0.f);
    thrust::copy(d_x.begin(), d_x.end(), d_xzp.begin());
    thrust::copy(d_h.begin(), d_h.end(), d_hzp.begin());

    // Transform the signal and the filter:
    cufftHandle plan;
    cufftPlan1d(&plan, Nfft, CUFFT_R2C, 1);
    thrust::device_vector<cufftComplex> d_X(Nfft/2+1);
    thrust::device_vector<cufftComplex> d_H(Nfft/2+1);
    cufftExecR2C(plan, (cufftReal*)thrust::raw_pointer_cast(d_xzp.data()), (cufftComplex*)thrust::raw_pointer_cast(d_X.data()));
    cufftExecR2C(plan, (cufftReal*)thrust::raw_pointer_cast(d_hzp.data()), (cufftComplex*)thrust::raw_pointer_cast(d_H.data()));

    thrust::device_vector<cufftComplex> d_Y(Nfft/2+1);
    thrust::transform(d_X.begin(), d_X.end(), d_H.begin(), d_Y.begin(), multiply_cufftComplex());  

    cufftPlan1d(&plan, Nfft, CUFFT_C2R, 1);
    thrust::device_vector<float> d_y(Nfft);
    cufftExecC2R(plan, (cufftComplex*)thrust::raw_pointer_cast(d_Y.data()), (cufftReal*)thrust::raw_pointer_cast(d_y.data()));

    getchar();

}
于 2014-05-19T15:33:00.893 回答
1

除了我期望的其他答案对于持续时间较长的卷积核会更方便之外,我在下面报告了一个不同的实现,它更符合 OP 的初始尝试,我希望对于持续时间的卷积核会更方便。这种实现基于利用共享内存中缓存的手写内核。更多细节可以在 DB Kirk 和 W.-m 的书中找到。W·胡

大规模并行处理器编程,第二版:实践方法

#include <stdio.h>
#include <stdlib.h>

#include "TimingGPU.cuh"
#include "Utilities.cuh"

#define RG          10
#define BLOCKSIZE   8

/****************/
/* CPU FUNCTION */
/****************/
void h_convolution_1D(const float * __restrict__ h_Signal, const float * __restrict__ h_ConvKernel, float * __restrict__ h_Result_CPU, 
                      const int N, const int K) {

    for (int i = 0; i < N; i++) {

        float temp = 0.f;

        int N_start_point = i - (K / 2);

        for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
            temp += h_Signal[N_start_point+ j] * h_ConvKernel[j];
        }

        h_Result_CPU[i] = temp;
    }
}

/********************/
/* BASIC GPU KERNEL */
/********************/
__global__ void d_convolution_1D_basic(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU, 
                                       const int N, const int K) {

    int i = blockIdx.x * blockDim.x + threadIdx.x;

    float temp = 0.f;

    int N_start_point = i - (K / 2);

    for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {
        temp += d_Signal[N_start_point+ j] * d_ConvKernel[j];
    }

    d_Result_GPU[i] = temp;
}

/***************************/
/* GPU KERNEL WITH CACHING */
/***************************/
__global__ void d_convolution_1D_caching(const float * __restrict__ d_Signal, const float * __restrict__ d_ConvKernel, float * __restrict__ d_Result_GPU, 
                                         const int N, const int K) {

    int i = blockIdx.x * blockDim.x + threadIdx.x;

    __shared__ float d_Tile[BLOCKSIZE];

    d_Tile[threadIdx.x] = d_Signal[i];
    __syncthreads();

    float temp = 0.f;

    int N_start_point = i - (K / 2);

    for (int j = 0; j < K; j++) if (N_start_point + j >= 0 && N_start_point + j < N) {

            if ((N_start_point + j >= blockIdx.x * blockDim.x) && (N_start_point + j < (blockIdx.x + 1) * blockDim.x))

                // --- The signal element is in the tile loaded in the shared memory
                temp += d_Tile[threadIdx.x + j - (K / 2)] * d_ConvKernel[j]; 

            else

                // --- The signal element is not in the tile loaded in the shared memory
                temp += d_Signal[N_start_point + j] * d_ConvKernel[j];

    }

    d_Result_GPU[i] = temp;
}

/********/
/* MAIN */
/********/
int main(){

    const int N = 15;           // --- Signal length
    const int K = 5;            // --- Convolution kernel length

    float *h_Signal         = (float *)malloc(N * sizeof(float));
    float *h_Result_CPU     = (float *)malloc(N * sizeof(float));
    float *h_Result_GPU     = (float *)malloc(N * sizeof(float));
    float *h_ConvKernel     = (float *)malloc(K * sizeof(float));

    float *d_Signal;        gpuErrchk(cudaMalloc(&d_Signal,     N * sizeof(float)));
    float *d_Result_GPU;    gpuErrchk(cudaMalloc(&d_Result_GPU, N * sizeof(float)));
    float *d_ConvKernel;    gpuErrchk(cudaMalloc(&d_ConvKernel, K * sizeof(float)));

    for (int i=0; i < N; i++) { h_Signal[i] = (float)(rand() % RG); }

    for (int i=0; i < K; i++) { h_ConvKernel[i] = (float)(rand() % RG); }

    gpuErrchk(cudaMemcpy(d_Signal,      h_Signal,       N * sizeof(float), cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(d_ConvKernel,  h_ConvKernel,   K * sizeof(float), cudaMemcpyHostToDevice));

    h_convolution_1D(h_Signal, h_ConvKernel, h_Result_CPU, N, K);

    d_convolution_1D_basic<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_Signal, d_ConvKernel, d_Result_GPU, N, K);
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());

    gpuErrchk(cudaMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), cudaMemcpyDeviceToHost));

    for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}

    printf("Test basic passed\n");

    d_convolution_1D_caching<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_Signal, d_ConvKernel, d_Result_GPU, N, K);
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize());

    gpuErrchk(cudaMemcpy(h_Result_GPU, d_Result_GPU, N * sizeof(float), cudaMemcpyDeviceToHost));

    for (int i = 0; i < N; i++) if (h_Result_CPU[i] != h_Result_GPU[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, h_Result_CPU[i], h_Result_GPU[i]); return 1;}

    printf("Test caching passed\n");

    return 0;
}
于 2015-08-25T13:24:24.297 回答