2

这是在 CUDA 中执行计时的标准方式:

cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);

cudaEventRecord(start, 0);

// Something to be timed

cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);

cudaEventElapsedTime(&time, start, stop);
printf ("Time: %f ms\n", time);

在 CUDA simpleP2P(点对点)示例中,计时以这种方式执行:

cudaEvent_t start, stop;
float time;
int eventflags = cudaEventBlockingSync;
cudaEventCreateWithFlags(&start,eventflags);
cudaEventCreateWithFlags(&stop,eventflags);

cudaEventRecord(start,0);

// Something to be timed

cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);

我的问题是:

  1. 为什么,对于 P2P 示例,时间是cudaEventCreateWithFlagscudaEventBlockingSync?
  2. 一般来说,所有多 GPU 应用程序(包括对等内存复制时间)都需要它吗?

谢谢。

4

1 回答 1

3

将近三年后,我正在回答我自己的问题。

为此,我将考虑我在 CUDA 多 GPU 执行中的并发中的示例,其中强调了使用异步副本如何实现真正的多 GPU 并发。特别是,我将考虑那篇文章的测试用例 #8

为了清楚起见,此处报告了测试用例 #8 的完整代码和分析器时间线。

#include "Utilities.cuh"
#include "InputOutput.cuh"

#define BLOCKSIZE 128

/*******************/
/* KERNEL FUNCTION */
/*******************/
template<class T>
__global__ void kernelFunction(T * __restrict__ d_data, const unsigned int NperGPU) {

    const int tid = threadIdx.x + blockIdx.x * blockDim.x;

    if (tid < NperGPU) for (int k = 0; k < 1000; k++) d_data[tid] = d_data[tid] * d_data[tid];

}

/******************/
/* PLAN STRUCTURE */
/******************/
// --- Async
template<class T>
struct plan {
    T               *d_data;
};

/*********************/
/* SVD PLAN CREATION */
/*********************/
template<class T>
void createPlan(plan<T>& plan, unsigned int NperGPU, unsigned int gpuID) {

    // --- Device allocation
    gpuErrchk(cudaSetDevice(gpuID));
    gpuErrchk(cudaMalloc(&(plan.d_data), NperGPU * sizeof(T)));
}

/********/
/* MAIN */
/********/
int main() {

    const int numGPUs   = 4;
    const int NperGPU   = 500000;
    const int N         = NperGPU * numGPUs;

    plan<double> plan[numGPUs];
    for (int k = 0; k < numGPUs; k++) createPlan(plan[k], NperGPU, k);

    // --- "Breadth-first" approach - async
    double *inputMatrices;   gpuErrchk(cudaMallocHost(&inputMatrices, N * sizeof(double)));
    for (int k = 0; k < numGPUs; k++) {
        gpuErrchk(cudaSetDevice(k));
        gpuErrchk(cudaMemcpyAsync(plan[k].d_data, inputMatrices + k * NperGPU, NperGPU * sizeof(double), cudaMemcpyHostToDevice));
    }

    for (int k = 0; k < numGPUs; k++) {
        gpuErrchk(cudaSetDevice(k));
        kernelFunction<<<iDivUp(NperGPU, BLOCKSIZE), BLOCKSIZE>>>(plan[k].d_data, NperGPU);
    }

    for (int k = 0; k < numGPUs; k++) {
        gpuErrchk(cudaSetDevice(k));
        gpuErrchk(cudaMemcpyAsync(inputMatrices + k * NperGPU, plan[k].d_data, NperGPU * sizeof(double), cudaMemcpyDeviceToHost));
    }

    gpuErrchk(cudaDeviceReset());
}

在此处输入图像描述

定时异步副本 - 并发性被破坏

现在,让我们从计时异步副本开始。一种可能的方法是使用以下代码段:

float time[numGPUs];
cudaEvent_t start[numGPUs], stop[numGPUs];

// --- "Breadth-first" approach - async
for (int k = 0; k < numGPUs; k++) {
    gpuErrchk(cudaSetDevice(k));
    cudaEventCreateWithFlags(&start[k], cudaEventBlockingSync);
    cudaEventCreateWithFlags(&stop[k], cudaEventBlockingSync);
    cudaEventRecord(start[k], 0);
    gpuErrchk(cudaMemcpyAsync(plan[k].d_data, plan[k].h_data, NperGPU * sizeof(double), cudaMemcpyHostToDevice));
    cudaEventRecord(stop[k], 0);
    cudaEventSynchronize(stop[k]);
    cudaEventElapsedTime(&time[k], start[k], stop[k]);
}
for (int k = 0; k < numGPUs; k++) printf("Elapsed time:  %3.1f ms \n", time[k]);

不幸的是,这种计时方式破坏了并发性,从下面的分析器时间线中可以看出:

在此处输入图像描述

定时异步副本 - 保留并发性

为避免此问题,可以将 GPU 任务作为 OpenMP 线程启动,如下所示:

int maxNumProcessors = omp_get_max_threads();
std::cout << "Maximum number of CPU threads = " << maxNumProcessors << std::endl;

// --- "Breadth-first" approach - async
omp_set_num_threads(numGPUs);
#pragma omp parallel
{
    unsigned int k = omp_get_thread_num();
    gpuErrchk(cudaSetDevice(k));
    cudaEventCreateWithFlags(&start[k], cudaEventBlockingSync);
    cudaEventCreateWithFlags(&stop[k], cudaEventBlockingSync);
    cudaEventRecord(start[k], 0);
    gpuErrchk(cudaMemcpyAsync(plan[k].d_data, plan[k].h_data, NperGPU * sizeof(double), cudaMemcpyHostToDevice));
    cudaEventRecord(stop[k], 0);
    cudaEventSynchronize(stop[k]);
    cudaEventElapsedTime(&time[k], start[k], stop[k]);
    printf("Thread nr. %i; Elapsed time:  %3.1f ms \n", k, time[k]);
}

从分析器时间线可以看出,保留了并发性。

在此处输入图像描述

内核启动时间 - 并发性被破坏

在内核启动计时时也会发生同样的情况。使用以下代码段,并发性被破坏。

for (int k = 0; k < numGPUs; k++) {
    gpuErrchk(cudaSetDevice(k));
    cudaEventCreateWithFlags(&start[k], cudaEventBlockingSync);
    cudaEventCreateWithFlags(&stop[k], cudaEventBlockingSync);
    cudaEventRecord(start[k], 0);
    kernelFunction<<<iDivUp(NperGPU, BLOCKSIZE), BLOCKSIZE>>>(plan[k].d_data, NperGPU);
    cudaEventRecord(stop[k], 0);
    cudaEventSynchronize(stop[k]);
    cudaEventElapsedTime(&time[k], start[k], stop[k]);
}
for (int k = 0; k < numGPUs; k++) printf("Elapsed time:  %3.1f ms \n", time[k]);

在此处输入图像描述

内核启动时间 - 保留并发性

与上述相反,使用 OpenMP 可以保留并发性。

int maxNumProcessors = omp_get_max_threads();
std::cout << "Maximum number of CPU threads = " << maxNumProcessors << std::endl;

omp_set_num_threads(numGPUs);
#pragma omp parallel
{
    unsigned int k = omp_get_thread_num();
    gpuErrchk(cudaSetDevice(k));
    cudaEventCreateWithFlags(&start[k], cudaEventBlockingSync);
    cudaEventCreateWithFlags(&stop[k], cudaEventBlockingSync);
    cudaEventRecord(start[k], 0);
    kernelFunction<<<iDivUp(NperGPU, BLOCKSIZE), BLOCKSIZE>>>(plan[k].d_data, NperGPU);
    cudaEventRecord(stop[k], 0);
    cudaEventSynchronize(stop[k]);
    cudaEventElapsedTime(&time[k], start[k], stop[k]);
    printf("Thread nr. %i; Elapsed time:  %3.1f ms \n", k, time[k]);
}

在此处输入图像描述

于 2016-03-02T12:00:52.710 回答