同样遵循 Paul R 的评论,我改进了我的代码。现在,另一种方法比标准(零填充)方法更快。下面是完整的 C++ 脚本。对于第 1 步和第 3 步,我评论了其他尝试过的解决方案,这些解决方案显示出与未评论的解决方案一样慢或一样快。for
考虑到未来更简单的 CUDA 并行化,我也有特权非嵌套循环。我还没有为 FFTW 使用多线程。
#include <stdio.h>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <math.h>
#include <fstream>
#include <omp.h>
#include <fftw3.h>
#include "TimingCPU.h"
#define PI_d 3.141592653589793
/******************/
/* STEP #1 ON CPU */
/******************/
void step1CPU(fftw_complex * __restrict h_xpruning, const fftw_complex * __restrict h_x, const int N, const int K) {
// double factor = -2. * PI_d / (K * N);
// int n;
// omp_set_nested(1);
//#pragma omp parallel for private(n) num_threads(4)
// for (int k = 0; k < K; k++) {
// double arg1 = factor * k;
//#pragma omp parallel for num_threads(4)
// for (n = 0; n < N; n++) {
// double arg = arg1 * n;
// double cosarg = cos(arg);
// double sinarg = sin(arg);
// h_xpruning[k * N + n][0] = h_x[n][0] * cosarg - h_x[n][1] * sinarg;
// h_xpruning[k * N + n][1] = h_x[n][0] * sinarg + h_x[n][1] * cosarg;
// }
// }
//double factor = -2. * PI_d / (K * N);
//int k;
//omp_set_nested(1);
//#pragma omp parallel for private(k) num_threads(4)
//for (int n = 0; n < N; n++) {
// double arg1 = factor * n;
// #pragma omp parallel for num_threads(4)
// for (k = 0; k < K; k++) {
// double arg = arg1 * k;
// double cosarg = cos(arg);
// double sinarg = sin(arg);
// h_xpruning[k * N + n][0] = h_x[n][0] * cosarg - h_x[n][1] * sinarg;
// h_xpruning[k * N + n][1] = h_x[n][0] * sinarg + h_x[n][1] * cosarg;
// }
//}
//double factor = -2. * PI_d / (K * N);
//for (int k = 0; k < K; k++) {
// double arg1 = factor * k;
// for (int n = 0; n < N; n++) {
// double arg = arg1 * n;
// double cosarg = cos(arg);
// double sinarg = sin(arg);
// h_xpruning[k * N + n][0] = h_x[n][0] * cosarg - h_x[n][1] * sinarg;
// h_xpruning[k * N + n][1] = h_x[n][0] * sinarg + h_x[n][1] * cosarg;
// }
//}
//double factor = -2. * PI_d / (K * N);
//for (int n = 0; n < N; n++) {
// double arg1 = factor * n;
// for (int k = 0; k < K; k++) {
// double arg = arg1 * k;
// double cosarg = cos(arg);
// double sinarg = sin(arg);
// h_xpruning[k * N + n][0] = h_x[n][0] * cosarg - h_x[n][1] * sinarg;
// h_xpruning[k * N + n][1] = h_x[n][0] * sinarg + h_x[n][1] * cosarg;
// }
//}
double factor = -2. * PI_d / (K * N);
#pragma omp parallel for num_threads(8)
for (int n = 0; n < K * N; n++) {
int row = n / N;
int col = n % N;
double arg = factor * row * col;
double cosarg = cos(arg);
double sinarg = sin(arg);
h_xpruning[n][0] = h_x[col][0] * cosarg - h_x[col][1] * sinarg;
h_xpruning[n][1] = h_x[col][0] * sinarg + h_x[col][1] * cosarg;
}
}
/******************/
/* STEP #3 ON CPU */
/******************/
void step3CPU(fftw_complex * __restrict h_xhatpruning, const fftw_complex * __restrict h_xhatpruning_temp, const int N, const int K) {
//int k;
//omp_set_nested(1);
//#pragma omp parallel for private(k) num_threads(4)
//for (int p = 0; p < N; p++) {
// #pragma omp parallel for num_threads(4)
// for (k = 0; k < K; k++) {
// h_xhatpruning[p * K + k][0] = h_xhatpruning_temp[p + k * N][0];
// h_xhatpruning[p * K + k][1] = h_xhatpruning_temp[p + k * N][1];
// }
//}
//int p;
//omp_set_nested(1);
//#pragma omp parallel for private(p) num_threads(4)
//for (int k = 0; k < K; k++) {
// #pragma omp parallel for num_threads(4)
// for (p = 0; p < N; p++) {
// h_xhatpruning[p * K + k][0] = h_xhatpruning_temp[p + k * N][0];
// h_xhatpruning[p * K + k][1] = h_xhatpruning_temp[p + k * N][1];
// }
//}
//for (int p = 0; p < N; p++) {
// for (int k = 0; k < K; k++) {
// h_xhatpruning[p * K + k][0] = h_xhatpruning_temp[p + k * N][0];
// h_xhatpruning[p * K + k][1] = h_xhatpruning_temp[p + k * N][1];
// }
//}
//for (int k = 0; k < K; k++) {
// for (int p = 0; p < N; p++) {
// h_xhatpruning[p * K + k][0] = h_xhatpruning_temp[p + k * N][0];
// h_xhatpruning[p * K + k][1] = h_xhatpruning_temp[p + k * N][1];
// }
//}
#pragma omp parallel for num_threads(8)
for (int p = 0; p < K * N; p++) {
int col = p % N;
int row = p / K;
h_xhatpruning[col * K + row][0] = h_xhatpruning_temp[col + row * N][0];
h_xhatpruning[col * K + row][1] = h_xhatpruning_temp[col + row * N][1];
}
//for (int p = 0; p < N; p += 2) {
// for (int k = 0; k < K; k++) {
// for (int p0 = 0; p0 < 2; p0++) {
// h_xhatpruning[(p + p0) * K + k][0] = h_xhatpruning_temp[(p + p0) + k * N][0];
// h_xhatpruning[(p + p0) * K + k][1] = h_xhatpruning_temp[(p + p0) + k * N][1];
// }
// }
//}
}
/********/
/* MAIN */
/********/
void main() {
int N = 10;
int K = 100000;
// --- CPU memory allocations
fftw_complex *h_x = (fftw_complex *)malloc(N * sizeof(fftw_complex));
fftw_complex *h_xzp = (fftw_complex *)calloc(N * K, sizeof(fftw_complex));
fftw_complex *h_xpruning = (fftw_complex *)malloc(N * K * sizeof(fftw_complex));
fftw_complex *h_xhatpruning = (fftw_complex *)malloc(N * K * sizeof(fftw_complex));
fftw_complex *h_xhatpruning_temp = (fftw_complex *)malloc(N * K * sizeof(fftw_complex));
fftw_complex *h_xhat = (fftw_complex *)malloc(N * K * sizeof(fftw_complex));
//double2 *h_xhatGPU = (double2 *)malloc(N * K * sizeof(double2));
// --- Random number generation of the data sequence on the CPU - moving the data from CPU to GPU
srand(time(NULL));
for (int k = 0; k < N; k++) {
h_x[k][0] = (double)rand() / (double)RAND_MAX;
h_x[k][1] = (double)rand() / (double)RAND_MAX;
}
//gpuErrchk(cudaMemcpy(d_x, h_x, N * sizeof(double2), cudaMemcpyHostToDevice));
memcpy(h_xzp, h_x, N * sizeof(fftw_complex));
// --- FFTW and cuFFT plans
fftw_plan h_plan_zp = fftw_plan_dft_1d(N * K, h_xzp, h_xhat, FFTW_FORWARD, FFTW_ESTIMATE);
fftw_plan h_plan_pruning = fftw_plan_many_dft(1, &N, K, h_xpruning, NULL, 1, N, h_xhatpruning_temp, NULL, 1, N, FFTW_FORWARD, FFTW_ESTIMATE);
double totalTimeCPU = 0., totalTimeGPU = 0.;
double partialTimeCPU, partialTimeGPU;
/****************************/
/* STANDARD APPROACH ON CPU */
/****************************/
printf("Number of processors available = %i\n", omp_get_num_procs());
printf("Number of threads = %i\n", omp_get_max_threads());
TimingCPU timerCPU;
timerCPU.StartCounter();
fftw_execute(h_plan_zp);
printf("\nStadard on CPU: \t \t %f\n", timerCPU.GetCounter());
/******************/
/* STEP #1 ON CPU */
/******************/
timerCPU.StartCounter();
step1CPU(h_xpruning, h_x, N, K);
partialTimeCPU = timerCPU.GetCounter();
totalTimeCPU = totalTimeCPU + partialTimeCPU;
printf("\nOptimized first step CPU: \t %f\n", totalTimeCPU);
/******************/
/* STEP #2 ON CPU */
/******************/
timerCPU.StartCounter();
fftw_execute(h_plan_pruning);
partialTimeCPU = timerCPU.GetCounter();
totalTimeCPU = totalTimeCPU + partialTimeCPU;
printf("Optimized second step CPU: \t %f\n", timerCPU.GetCounter());
/******************/
/* STEP #3 ON CPU */
/******************/
timerCPU.StartCounter();
step3CPU(h_xhatpruning, h_xhatpruning_temp, N, K);
partialTimeCPU = timerCPU.GetCounter();
totalTimeCPU = totalTimeCPU + partialTimeCPU;
printf("Optimized third step CPU: \t %f\n", partialTimeCPU);
printf("Total time CPU: \t \t %f\n", totalTimeCPU);
double rmserror = 0., norm = 0.;
for (int n = 0; n < N; n++) {
rmserror = rmserror + (h_xhatpruning[n][0] - h_xhat[n][0]) * (h_xhatpruning[n][0] - h_xhat[n][0]) + (h_xhatpruning[n][1] - h_xhat[n][1]) * (h_xhatpruning[n][1] - h_xhat[n][1]);
norm = norm + h_xhat[n][0] * h_xhat[n][0] + h_xhat[n][1] * h_xhat[n][1];
}
printf("\nrmserror %f\n", 100. * sqrt(rmserror / norm));
fftw_destroy_plan(h_plan_zp);
}
对于案件
N = 10
K = 100000
我的时间如下
Stadard on CPU: 23.895417
Optimized first step CPU: 4.472087
Optimized second step CPU: 4.926603
Optimized third step CPU: 2.394958
Total time CPU: 11.793648