-1

要求

我写信请求有关优化我的解决方案/方法“CalculateConvolutionOutputTensor__im2col”的指导。我希望帮助确定超越我幼稚方法的最佳策略;提供有关任何相关 GPU 进程及其应用方式的直觉(例如,银行冲突);并根据我可以调整的内容帮助解释上述配置文件。

使用 GeForce 2080 Ti 时,该方法的第一次运行需要 0.774 秒。我已经包含了我编写的唯一 CUDA C++ 内核的 Nsight Compute 配置文件的屏幕截图:im2col。

在此处输入图像描述

我能做的事情

我可以让每个 GPU 线程访问共享内存而不是全局内存。我可以将 GPU“堆”变量转移到内核“堆栈”,而不是取消对每个线程和内核内 for 循环迭代的引用。我可以将小参数放入 GPU 内存中的数组中,并将单个指针传递给这些数组。我可以使用更复杂的 im2col 版本。

我尝试过的事情

我宁愿不使用 cuDNN 7.6.5;当我使用 cuDNN 7.6.5 并编写语句“cudnnCreate(&cudnnHandle);”时,Nsight Compute 建议方法 cuModuleGetFunction 返回 CUDA_ERROR_NOT_FOUND。

重建解决方案

我用来创建这个项目的过程是使用 Visual Studio Community 2019 创建一个新的 CUDA 10.2 Runtime 项目,将默认源文件重命名为“main.cu”,将所有内容替换为下面的第一个代码块,添加“CalculateConvolutionOutputTensor__im2col.h ” 到我的项目中,在下面添加第二个代码块,在我的项目中添加“CalculateConvolutionOutputTensor__im2col.cu”,在下面添加第三个代码块,并添加“cublas.lib;” 到 Project Properties -> Linker -> Input -> Additional Dependencies

主文件

// Allow use of cudaMalloc.
#include <cuda_runtime.h>

// Allow use of time(NULL) as a seed.
#include <ctime>

// Allow construction of a default_random_engine.
#include <random>

// Allow use of CalculateConvolutionOutputTensor__im2col.
#include "CalculateConvolutionOutputTensor__im2col.h"


int main()
{
    // --------------------------------------------------------------------------
    // Declare and define parameters of CalculateConvolutionOutputTensor__im2col.
    // --------------------------------------------------------------------------
    float* convolutionOutputTensor;
    cudaMalloc(&convolutionOutputTensor, 6 * 3 * 19 * 19 * 4 * sizeof(float));

    int elementsInFilter = 3 * 590 * 590;

    int elementsInChannelOfOutputTensor = 19 * 19;

    int imagesInSubdivision = 4;

    int channelsInFilter_host = 3;
    int* channelsInFilter_GPU;
    cudaMalloc(&channelsInFilter_GPU, sizeof(int));
    cudaMemcpy(channelsInFilter_GPU, &channelsInFilter_host, sizeof(int), cudaMemcpyHostToDevice);

    int widthOfFilter_host = 590;
    int* widthOfFilter_GPU;
    cudaMalloc(&widthOfFilter_GPU, sizeof(int));
    cudaMemcpy(widthOfFilter_GPU, &widthOfFilter_host, sizeof(int), cudaMemcpyHostToDevice);

    int heightOfOutputTensor_host = 19;
    int* heightOfOutputTensor_GPU;
    cudaMalloc(&heightOfOutputTensor_GPU, sizeof(int));
    cudaMemcpy(heightOfOutputTensor_GPU, &heightOfOutputTensor_host, sizeof(int), cudaMemcpyHostToDevice);

    int widthOfOutputTensor_host = 19;
    int* widthOfOutputTensor_GPU;
    cudaMalloc(&widthOfOutputTensor_GPU, sizeof(int));
    cudaMemcpy(widthOfOutputTensor_GPU, &widthOfOutputTensor_host, sizeof(int), cudaMemcpyHostToDevice);

    int elementsInChannelOfOutputTensor_host = 19 * 19;
    int* elementsInChannelOfOutputTensor_GPU;
    cudaMalloc(&elementsInChannelOfOutputTensor_GPU, sizeof(int));
    cudaMemcpy(
        elementsInChannelOfOutputTensor_GPU,
        &elementsInChannelOfOutputTensor_host,
        sizeof(int),
        cudaMemcpyHostToDevice);

    int channelsInFilter_times_elementsInChannelOfOutputTensor_host = 3 * 19 * 19;
    int* channelsInFilter_times_elementsInChannelOfOutputTensor_GPU;
    cudaMalloc(&channelsInFilter_times_elementsInChannelOfOutputTensor_GPU, sizeof(int));
    cudaMemcpy(
        channelsInFilter_times_elementsInChannelOfOutputTensor_GPU,
        &channelsInFilter_times_elementsInChannelOfOutputTensor_host,
        sizeof(int),
        cudaMemcpyHostToDevice);

    int elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_host = 3 * 590 * 19 * 19;
    int* elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_GPU;
    cudaMalloc(&elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_GPU, sizeof(int));
    cudaMemcpy(
        elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_GPU,
        &elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_host,
        sizeof(int),
        cudaMemcpyHostToDevice);

    int elementsInInputTensor = 3 * 608 * 608 * 4;
    float* inputTensor_host = new float[elementsInInputTensor];
    for (int i = 0; i < elementsInInputTensor; ++i) {
        inputTensor_host[i] = ((float)(i % 255)) / 255.0;
    }
    float* inputTensor_GPU;
    cudaMalloc(&inputTensor_GPU, elementsInInputTensor * sizeof(float));
    cudaMemcpy(
        inputTensor_GPU,
        inputTensor_host,
        elementsInInputTensor * sizeof(float),
        cudaMemcpyHostToDevice);
    delete[] inputTensor_host;

    int horizontalFilterStride_host = 1;
    int* horizontalFilterStride_GPU;
    cudaMalloc(&horizontalFilterStride_GPU, sizeof(int));
    cudaMemcpy(
        horizontalFilterStride_GPU,
        &horizontalFilterStride_host,
        sizeof(int),
        cudaMemcpyHostToDevice);

    int channelsInImage_host = 3;
    int* channelsInImage_GPU;
    cudaMalloc(&channelsInImage_GPU, sizeof(int));
    cudaMemcpy(channelsInImage_GPU, &channelsInImage_host, sizeof(int), cudaMemcpyHostToDevice);

    int verticalFilterStride_host = 1;
    int* verticalFilterStride_GPU;
    cudaMalloc(&verticalFilterStride_GPU, sizeof(int));
    cudaMemcpy(
        verticalFilterStride_GPU,
        &verticalFilterStride_host,
        sizeof(int),
        cudaMemcpyHostToDevice);

    int elementsInCrossSectionOfImage_host = 3 * 608;
    int* elementsInCrossSectionOfImage_GPU;
    cudaMalloc(&elementsInCrossSectionOfImage_GPU, sizeof(int));
    cudaMemcpy(
        elementsInCrossSectionOfImage_GPU,
        &elementsInCrossSectionOfImage_host,
        sizeof(int),
        cudaMemcpyHostToDevice);

    int elementsInImage_host = 3 * 608 * 608;
    int* elementsInImage_GPU;
    cudaMalloc(&elementsInImage_GPU, sizeof(int));
    cudaMemcpy(elementsInImage_GPU, &elementsInImage_host, sizeof(int), cudaMemcpyHostToDevice);

    int filters = 6 * 3;

    int elementsInFilterTensor = 6 * 3 * 3 * 590 * 590;
    float* filterTensor_host = new float[elementsInFilterTensor];
    std::default_random_engine randomNumberGenerator(time(NULL));
    std::normal_distribution<float> normalDistribution(0.0, 1.0);
    for (int i = 0; i < elementsInFilterTensor; ++i) {
        filterTensor_host[i] = normalDistribution(randomNumberGenerator) / sqrt((float)elementsInFilterTensor);
    }
    float* filterTensor_GPU;
    cudaMalloc(&filterTensor_GPU, elementsInFilterTensor * sizeof(float));
    cudaMemcpy(
        filterTensor_GPU,
        filterTensor_host,
        elementsInFilterTensor * sizeof(float),
        cudaMemcpyHostToDevice);
    delete[] filterTensor_host;

    int elementsInOutputSubtensor = 6 * 3 * 19 * 19;


    // -------------------------------------------------
    // Execute CalculateConvolutionOutputTensor__im2col.
    // -------------------------------------------------
    CalculateConvolutionOutputTensor__im2col(
        convolutionOutputTensor,
        elementsInFilter,
        elementsInChannelOfOutputTensor_host,
        imagesInSubdivision,
        channelsInFilter_GPU,
        widthOfFilter_GPU,
        heightOfOutputTensor_GPU,
        widthOfOutputTensor_GPU,
        elementsInChannelOfOutputTensor_GPU,
        channelsInFilter_times_elementsInChannelOfOutputTensor_GPU,
        elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_GPU,
        inputTensor_GPU,
        horizontalFilterStride_GPU,
        channelsInImage_GPU,
        verticalFilterStride_GPU,
        elementsInCrossSectionOfImage_GPU,
        elementsInImage_GPU,
        filters,
        filterTensor_GPU,
        elementsInOutputSubtensor);

    cudaFree(channelsInFilter_GPU);
    cudaFree(widthOfFilter_GPU);
    cudaFree(heightOfOutputTensor_GPU);
    cudaFree(widthOfOutputTensor_GPU);
    cudaFree(elementsInChannelOfOutputTensor_GPU);
    cudaFree(channelsInFilter_times_elementsInChannelOfOutputTensor_GPU);
    cudaFree(elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_GPU);
    cudaFree(inputTensor_GPU);
    cudaFree(horizontalFilterStride_GPU);
    cudaFree(channelsInImage_GPU);
    cudaFree(verticalFilterStride_GPU);
    cudaFree(elementsInCrossSectionOfImage_GPU);
    cudaFree(elementsInImage_GPU);
    cudaFree(filterTensor_GPU);

    // --------------------------------------------------
    // Make sure that convolutionOutputTensor is correct.
    // --------------------------------------------------
    float* convolutionOutputTensor_test = new float[6 * 3 * 19 * 19 * 4];
    cudaMemcpy(
        convolutionOutputTensor_test,
        convolutionOutputTensor,
        6 * 3 * 19 * 19 * 4 * sizeof(float),
        cudaMemcpyDeviceToHost);
    printf("convolutionOutputTensor_test: {");
    for (int i = 0; i < 18; ++i) {
        printf("%f, ", convolutionOutputTensor_test[i]);
    }
    printf("...}\n");
    delete[] convolutionOutputTensor_test;

    cudaFree(convolutionOutputTensor);

    return 0;
}

CalculateConvolutionOutputTensor__im2col.h

void CalculateConvolutionOutputTensor__im2col(
    float* convolutionOutputTensor_child,
    int elementsInFilter_child,
    int elementsInChannelOfOutputTensor_host_child,
    int imagesInSubdivision_child,
    int* channelsInFilter,
    int* widthOfFilter,
    int* heightOfOutputTensor,
    int* widthOfOutputTensor,
    int* elementsInChannelOfOutputTensor_GPU_child,
    int* channelsInFilter_times_elementsInChannelOfOutputTensor,
    int* elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor,
    float* inputTensor_child,
    int* horizontalFilterStride,
    int* channelsInImage,
    int* verticalFilterStride,
    int* elementsInCrossSectionOfImage,
    int* elementsInImage,
    int filters_child,
    float* filterTensor,
    int elementsInOutputSubtensor_child);

CalculateConvolutionOutputTensor__im2col.cu

// Allow use of __global__.
#include <cuda_runtime.h>

// Allow declaration of cublasHandle.
#include "cublas_v2.h"

// Allow use of blockIdx.x, blockDim.x, and threadIdx.x.
#include <device_launch_parameters.h>


__global__
void im2col(
    float* col_child,
    int* channelsInFilter_child,
    int* widthOfFilter_child,
    int* heightOfOutputTensor_child,
    int* widthOfOutputTensor_child,
    int* elementsInChannelOfOutputTensor_child,
    int* channelsInFilter_times_elementsInChannelOfOutputTensor_child,
    int* elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_child,
    float* inputTensor_child_child,
    int* horizontalFilterStride_child,
    int* channelsInImage_child,
    int* verticalFilterStride_child,
    int* elementsInCrossSectionOfImage_child,
    int* image_child,
    int* elementsInImage_child);


void CalculateConvolutionOutputTensor__im2col(
    float* convolutionOutputTensor_child,
    int elementsInFilter_child,
    int elementsInChannelOfOutputTensor_host_child,
    int imagesInSubdivision_child,
    int* channelsInFilter,
    int* widthOfFilter,
    int* heightOfOutputTensor,
    int* widthOfOutputTensor,
    int* elementsInChannelOfOutputTensor_GPU_child,
    int* channelsInFilter_times_elementsInChannelOfOutputTensor,
    int* elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor,
    float* inputTensor_child,
    int* horizontalFilterStride,
    int* channelsInImage,
    int* verticalFilterStride,
    int* elementsInCrossSectionOfImage,
    int* elementsInImage,
    int filters_child,
    float* filterTensor,
    int elementsInOutputSubtensor_child)
{
    // -----------------------------------------
    // Define and declare parameters for im2col.
    // -----------------------------------------
    // Define parameters for the execution configuration of im2col.
    int threads_per_block_for_im2col = 885;
    int blocks_for_im2col =
        (elementsInFilter_child + threads_per_block_for_im2col - 1) / threads_per_block_for_im2col;

    // Declare col.
    float* col;

    // The following statement is required to
    // prevent automatic casting of a product to an eight-byte integer.
    int elementsInFilter_times_elementsInChannelOfOutputTensor =
        elementsInFilter_child * elementsInChannelOfOutputTensor_host_child;

    cudaMalloc(&col, elementsInFilter_times_elementsInChannelOfOutputTensor * sizeof(float));


    // -----------------------------------------------------------------------------
    // Define parameters for calculating the matrix product of filterTensor and col.
    // -----------------------------------------------------------------------------
    // Define a cublasHandle_t object called cublasHandle.
    // Declaring cublasHandle requires '#include "cublas_v2.h"'.
    // Defining cublasHandle requires adding "cublas.lib" to
    // Properties -> Linker -> Input -> Additional Dependencies.
    cublasHandle_t cublasHandle;
    cublasCreate(&cublasHandle);

    // Define parameters for (not) including
    // a portion of a third matrix in product_filterTensor_and_col.
    float one = 1.0;
    float zero = 0.0;


    // ------------------------------------------------------------
    // For each image in subdivision,
    // sculpt image into matrix col.
    // Calculate the matrix product of filterTensor and col and
    // store the product as a subtensor of convolutionOutputTensor.
    // ------------------------------------------------------------
    // The following statement is required to
    // prevent automatic casting of a product to an eight-byte integer.
    int image_times_elementsInOutputSubtensor;

    int* image_GPU;
    cudaMalloc(&image_GPU, sizeof(int));
    for (int image_host = 0; image_host < imagesInSubdivision_child; ++image_host) {
        cudaMemcpy(image_GPU, &image_host, sizeof(int), cudaMemcpyHostToDevice);

        im2col<<<blocks_for_im2col, threads_per_block_for_im2col>>>
            (col,
                channelsInFilter,
                widthOfFilter,
                heightOfOutputTensor,
                widthOfOutputTensor,
                elementsInChannelOfOutputTensor_GPU_child,
                channelsInFilter_times_elementsInChannelOfOutputTensor,
                elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor,
                inputTensor_child,
                horizontalFilterStride,
                channelsInImage,
                verticalFilterStride,
                elementsInCrossSectionOfImage,
                image_GPU,
                elementsInImage);
        cudaDeviceSynchronize();

        // The following statement is required to
        // prevent automatic casting of a product to an eight-byte integer.
        image_times_elementsInOutputSubtensor = image_host * elementsInOutputSubtensor_child;

        cublasSgemm(
            cublasHandle,
            CUBLAS_OP_N,
            CUBLAS_OP_N,
            elementsInChannelOfOutputTensor_host_child,
            filters_child,
            elementsInFilter_child,
            &one,
            col,
            elementsInChannelOfOutputTensor_host_child,
            filterTensor,
            elementsInFilter_child,
            &zero,
            convolutionOutputTensor_child + image_times_elementsInOutputSubtensor,
            elementsInChannelOfOutputTensor_host_child);
    }

    cudaFree(col);
    cudaFree(image_GPU);
}


__global__
void im2col(
    float* col_child,
    int* channelsInFilter_child,
    int* widthOfFilter_child,
    int* heightOfOutputTensor_child,
    int* widthOfOutputTensor_child,
    int* elementsInChannelOfOutputTensor_child,
    int* channelsInFilter_times_elementsInChannelOfOutputTensor_child,
    int* elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_child,
    float* inputTensor_child_child,
    int* horizontalFilterStride_child,
    int* channelsInImage_child,
    int* verticalFilterStride_child,
    int* elementsInCrossSectionOfImage_child,
    int* image,
    int* elementsInImage_child)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    int c_prime = index % (*channelsInFilter_child);
    int temp = (index - c_prime) / (*channelsInFilter_child);
    int w_prime = temp % (*widthOfFilter_child);
    int h_prime = temp / (*widthOfFilter_child);

    for (int h = 0; h < (*heightOfOutputTensor_child); ++h) {
        for (int w = 0; w < (*widthOfOutputTensor_child); ++w) {

            col_child[
                w +
                h * (*widthOfOutputTensor_child) +
                c_prime * (*elementsInChannelOfOutputTensor_child) +
                w_prime * (*channelsInFilter_times_elementsInChannelOfOutputTensor_child) +
                h_prime * (*elementsInCrossSectionOfFilter_times_elementsInChannelOfOutputTensor_child)] =
            inputTensor_child_child[
                c_prime +
                (w * (*horizontalFilterStride_child) + w_prime) * (*channelsInImage_child) +
                (h * (*verticalFilterStride_child) + h_prime) * (*elementsInCrossSectionOfImage_child) +
                (*image) * (*elementsInImage_child)];

        }
    }
}
4

1 回答 1

0

在阅读了 Robert Crovella 提供给我的 NVIDIA 文章后,我重写了我的解决方案“CalculateConvolutionOutputTensor__im2col”,以便从连续的全局内存中加载每个块中的线程。我使用了更少的索引算法和更少的参数。我看到(1 个方法/0.445 秒)/(1 个方法/0.774 秒)= 1.7 的方法加速,以及(1 个内核/35.27 毫秒)/(1 个内核/128.15 毫秒)的 im2col 内核加速= 3.6. 感谢您向我指出有用的特定阅读。

im2col 过去需要 128.15 毫秒;现在只需要 32.12 毫秒。Sgemm 现在需要 6.34 毫秒;可能当时差不多。它们的总时间为 38.46 毫秒。该对运行四次,总共 153.84 毫秒。我想知道如何加快 im2col 的速度,并减少“开销”中的 274.16 毫秒。

为了将图像雕刻成矩阵 col,我让每个 (2*590*19*19) 块中的 (3*590/2) 线程将图像的过滤器形状部分的半横截面顺序传输到 col。我相信从全局内存中加载的每个线程都与前一个线程访问的内存物理上相邻,并且每个线程存储到全局内存中的物理上与前一个线程存储的内存相邻。我确实注意到每个块中最后一个扭曲中的 11 个线程未使用。

我想我可能会采纳 th31 的建议并将这个优化线程移至 Code Review。

具有合并全局内存负载和存储的 im2col 的 Nsight Compute 配置文件

在此处输入图像描述

主文件

// Allow use of cudaMalloc.
#include <cuda_runtime.h>

// Allow use of structs in namespace chrono.
#include <ctime>

// Allow construction of a default_random_engine.
#include <random>

// Allow use of CalculateConvolutionOutputTensor__im2col.
#include "CalculateConvolutionOutputTensor__im2col.h"


int main()
{
    // --------------------------------------------------------------------------
    // Declare and define parameters of CalculateConvolutionOutputTensor__im2col.
    // --------------------------------------------------------------------------
    float* convolutionOutputTensor;
    cudaMalloc(&convolutionOutputTensor, 6 * 3 * 19 * 19 * 4 * sizeof(float));

    int elementsInFilter = 3 * 590 * 590;

    int elementsInChannelOfOutputTensor = 19 * 19;

    int imagesInSubdivision = 4;

    int elementsInInputTensor = 3 * 608 * 608 * 4;
    float* inputTensor_host = new float[elementsInInputTensor];
    for (int i = 0; i < elementsInInputTensor; ++i) {
        inputTensor_host[i] = ((float)(i % 255)) / 255.0;
    }
    float* inputTensor_GPU;
    cudaMalloc(&inputTensor_GPU, elementsInInputTensor * sizeof(float));
    cudaMemcpy(
        inputTensor_GPU,
        inputTensor_host,
        elementsInInputTensor * sizeof(float),
        cudaMemcpyHostToDevice);
    delete[] inputTensor_host;

    int heightOfFilter_host = 590;
    int* heightOfFilter_GPU;
    cudaMalloc(&heightOfFilter_GPU, sizeof(int));
    cudaMemcpy(heightOfFilter_GPU, &heightOfFilter_host, sizeof(int), cudaMemcpyHostToDevice);

    int channelsInImage_host = 3;
    int* channelsInImage_GPU;
    cudaMalloc(&channelsInImage_GPU, sizeof(int));
    cudaMemcpy(channelsInImage_GPU, &channelsInImage_host, sizeof(int), cudaMemcpyHostToDevice);

    int widthOfImage_host = 608;
    int* widthOfImage_GPU;
    cudaMalloc(&widthOfImage_GPU, sizeof(int));
    cudaMemcpy(widthOfImage_GPU, &widthOfImage_host, sizeof(int), cudaMemcpyHostToDevice);

    int widthOfOutputTensor_host = 19;
    int* widthOfOutputTensor_GPU;
    cudaMalloc(&widthOfOutputTensor_GPU, sizeof(int));
    cudaMemcpy(widthOfOutputTensor_GPU, &widthOfOutputTensor_host, sizeof(int), cudaMemcpyHostToDevice);

    int heightOfImage_host = 608;
    int* heightOfImage_GPU;
    cudaMalloc(&heightOfImage_GPU, sizeof(int));
    cudaMemcpy(heightOfImage_GPU, &heightOfImage_host, sizeof(int), cudaMemcpyHostToDevice);

    int filters = 6 * 3;

    int elementsInFilterTensor = 6 * 3 * 3 * 590 * 590;
    float* filterTensor_host = new float[elementsInFilterTensor];
    std::default_random_engine randomNumberGenerator(time(NULL));
    std::normal_distribution<float> normalDistribution(0.0, 1.0);
    for (int i = 0; i < elementsInFilterTensor; ++i) {
        filterTensor_host[i] = normalDistribution(randomNumberGenerator) / sqrt((float)elementsInFilterTensor);
    }
    float* filterTensor_GPU;
    cudaMalloc(&filterTensor_GPU, elementsInFilterTensor * sizeof(float));
    cudaMemcpy(
        filterTensor_GPU,
        filterTensor_host,
        elementsInFilterTensor * sizeof(float),
        cudaMemcpyHostToDevice);
    delete[] filterTensor_host;

    int elementsInOutputSubtensor = 6 * 3 * 19 * 19;


    // -------------------------------------------------
    // Execute CalculateConvolutionOutputTensor__im2col.
    // -------------------------------------------------   
    CalculateConvolutionOutputTensor__im2col(
        convolutionOutputTensor,
        elementsInFilter,
        elementsInChannelOfOutputTensor,
        imagesInSubdivision,
        inputTensor_GPU,
        heightOfFilter_GPU,
        channelsInImage_GPU,
        widthOfImage_GPU,
        widthOfOutputTensor_GPU,
        heightOfImage_GPU,
        filters,
        filterTensor_GPU,
        elementsInOutputSubtensor);

    cudaFree(inputTensor_GPU);
    cudaFree(heightOfFilter_GPU);
    cudaFree(channelsInImage_GPU);
    cudaFree(widthOfImage_GPU);
    cudaFree(widthOfOutputTensor_GPU);
    cudaFree(heightOfImage_GPU);
    cudaFree(filterTensor_GPU);


    // --------------------------------------------------
    // Make sure that convolutionOutputTensor is correct.
    // --------------------------------------------------
    float* convolutionOutputTensor_test = new float[6 * 3 * 19 * 19 * 4];
    cudaMemcpy(
        convolutionOutputTensor_test,
        convolutionOutputTensor,
        6 * 3 * 19 * 19 * 4 * sizeof(float),
        cudaMemcpyDeviceToHost);
    printf("convolutionOutputTensor_test: {");
    for (int i = 0; i < 18; ++i) {
        printf("%f, ", convolutionOutputTensor_test[i]);
    }
    printf("...}\n");
    delete[] convolutionOutputTensor_test;

    cudaFree(convolutionOutputTensor);


    return 0;
}

CalculateConvolutionOutputTensor__im2col.h

void CalculateConvolutionOutputTensor__im2col(
    float* convolutionOutputTensor_child,
    int elementsInFilter_child,
    int elementsInChannelOfOutputTensor_host_child,
    int imagesInSubdivision_child,
    float* inputTensor_child,
    int* heightOfFilter,
    int* channelsInImage,
    int* widthOfImage,
    int* widthOfOutputTensor,
    int* heightOfImage,
    int filters_child,
    float* filterTensor,
    int elementsInOutputSubtensor_child);

CalculateConvolutionOutputTensor__im2col.cu

// Allow use of __global__.
#include <cuda_runtime.h>

// Allow declaration of cublasHandle.
#include "cublas_v2.h"

// Allow use of blockIdx.x, blockDim.x, and threadIdx.x.
#include <device_launch_parameters.h>


__global__
void im2col(
    float* col_child,
    float* inputTensor_child_child,
    int* heightOfFilter_child,
    int* channelsInImage_child,
    int* widthOfImage_child,
    int* widthOfOutputTensor_child,
    int* image,
    int* heightOfImage_child);


void CalculateConvolutionOutputTensor__im2col(
    float* convolutionOutputTensor_child,
    int elementsInFilter_child,
    int elementsInChannelOfOutputTensor_host_child,
    int imagesInSubdivision_child,
    float* inputTensor_child,
    int* heightOfFilter,
    int* channelsInImage,
    int* widthOfImage,
    int* widthOfOutputTensor,
    int* heightOfImage,
    int filters_child,
    float* filterTensor,
    int elementsInOutputSubtensor_child)
{
    // -----------------------------------------
    // Define and declare parameters for im2col.
    // -----------------------------------------
    // Define parameters for the execution configuration of im2col.
    int threads_per_block_for_im2col = 3 * 590 / 2;
    int blocks_for_im2col = 2 * 590 * 19 * 19;

    // Declare col.
    float* col;

    // The following statement is required to
    // prevent automatic casting of a product to an eight-byte integer.
    int elementsInFilter_times_elementsInChannelOfOutputTensor =
        elementsInFilter_child * elementsInChannelOfOutputTensor_host_child;

    cudaMalloc(&col, elementsInFilter_times_elementsInChannelOfOutputTensor * sizeof(float));


    // -----------------------------------------------------------------------------
    // Define parameters for calculating the matrix product of filterTensor and col.
    // -----------------------------------------------------------------------------
    // Define a cublasHandle_t object called cublasHandle.
    // Declaring cublasHandle requires '#include "cublas_v2.h"'.
    // Defining cublasHandle requires adding "cublas.lib" to
    // Properties -> Linker -> Input -> Additional Dependencies.
    cublasHandle_t cublasHandle;
    cublasCreate(&cublasHandle);

    // Define parameters for (not) including
    // a portion of a third matrix in product_filterTensor_and_col.
    float one = 1.0;
    float zero = 0.0;


    // ------------------------------------------------------------
    // For each image in subdivision,
    // sculpt image into matrix col.
    // Calculate the matrix product of filterTensor and col and
    // store the product as a subtensor of convolutionOutputTensor.
    // ------------------------------------------------------------
    // The following statement is required to
    // prevent automatic casting of a product to an eight-byte integer.
    int image_times_elementsInOutputSubtensor;

    int* image_GPU;
    cudaMalloc(&image_GPU, sizeof(int));
    for (int image_host = 0; image_host < imagesInSubdivision_child; ++image_host) {
        cudaMemcpy(image_GPU, &image_host, sizeof(int), cudaMemcpyHostToDevice);

        im2col
            <<<blocks_for_im2col,
               threads_per_block_for_im2col>>>
            (col,
             inputTensor_child,
             heightOfFilter,
             channelsInImage,
             widthOfImage,
             widthOfOutputTensor,
             image_GPU,
             heightOfImage);
        cudaDeviceSynchronize();

        // The following statement is required to
        // prevent automatic casting of a product to an eight-byte integer.
        image_times_elementsInOutputSubtensor = image_host * elementsInOutputSubtensor_child;

        cublasSgemm(
            cublasHandle,
            CUBLAS_OP_N,
            CUBLAS_OP_N,
            filters_child,
            elementsInChannelOfOutputTensor_host_child,
            elementsInFilter_child,
            &one,
            filterTensor,
            filters_child,
            col,
            elementsInFilter_child,
            &zero,
            convolutionOutputTensor_child + image_times_elementsInOutputSubtensor,
            filters_child);

        float element = 0.0;

    }

    cudaFree(col);
    cudaFree(image_GPU);
}


__global__
void im2col(
    float* col_child,
    float* inputTensor_child_child,
    int* heightOfFilter_child,
    int* channelsInImage_child,
    int* widthOfImage_child,
    int* widthOfOutputTensor_child,
    int* image,
    int* heightOfImage_child)
{
    col_child[blockIdx.x * blockDim.x + threadIdx.x] =
        inputTensor_child_child[
            threadIdx.x +
            (blockIdx.x % 2) * blockDim.x +
            ((blockIdx.x % (2 * (*heightOfFilter_child))) / 2) * (*channelsInImage_child) * (*widthOfImage_child) +
            (blockIdx.x / (2 * (*heightOfFilter_child))) * (*channelsInImage_child) +
            (blockIdx.x / (2 * (*heightOfFilter_child) * (*widthOfOutputTensor_child))) * (*channelsInImage_child) * (*widthOfImage_child) +
            (*image) * (*channelsInImage_child) * (*widthOfImage_child) * (*heightOfImage_child)];
}
于 2020-05-12T01:28:02.540 回答