1

编辑:随着时间的推移取得的成就列在这个问题的末尾(~1Tflops/s)。

我使用 C++ DLL 中的 opencl(gpu) 为 C# 编写了某种数学库,并且已经对单精度方阵 - 矩阵乘法进行了一些优化(用于学习目的和以后在神经网络程序中重用的可能性)。下面的内核代码将 v1 1D 数组作为 matrix1(1024x1024) 的行,将 v2 1D 数组作为 matrix2 的列((1024x1024)转置优化),并将结果作为 matrix-3 的行放入 v3 1D 数组中。(1024x1024)

目前,对于 HD7870,1024x1024 方阵-矩阵乘法的内核执行时间为 3.6 毫秒。

完成的优化:

  • 第二个矩阵的转置。(改进的时间)
  • 使用 32x32 子矩阵在本地内存中计算(4x 16x16,因为我的 HD7870 上的最大工作组大小为 256,并且由于某种原因 gpu 不接受超过 24kB 的本地内存,但在线消息来源说 64kB?)(无论如何,大大提高了时间)
  • 在将结果写入本地和全局之前,增加对私有变量的数据重用。(改进时间)
  • 列主要访问最内层循环中的本地二维数组。(改进时间)
  • 每个补丁共享加法到两个累加器寄存器。(提高时间并降低数值稳定性)
  • 循环展开最里面的循环并没有缩短时间(甚至在第 4 次展开后变得更糟)(所以整数 alu 必须放宽)

问:我无法完成一些优化,例如消除所有本地(lds)银行冲突和指令重新排序以隐藏内存延迟。我能做些什么来完善这个数学函数的性能?

这个内核肯定是本地内存带宽(冲突)有界的,有 3.2 毫秒的乘法=

(1024*1024*1024 * (1 sum + 1 mult =2) / 0.0036 seconds )= 596x10^9 Flops per second(596 GFlops) 我在 GTX680 上看到了一些 CUDA 的在线基准测试,它们已经突破了 1TFlops 点。因为每个计算单元或更多内核或两者都有更多的本地内存?

(1024*1024*1024*(2 float reads)*(4 bytes per float) /0.0036 sec)=2386x10^9 bytes per second 但是这个内核读取 8 个浮点数并使用它们 16 次,其中数据重用为 2每个浮动。

2386x10^9 字节/重复使用 (2) = 1193 GB/s

HD7870 的理论最大值为:此处,附录 D

计算能力=2560 Giga Floating point operations per second, LDS 带宽=2560 GB/s 寄存器访问带宽=15360 GB/s

这是内核:

__kernel void squareGpuMatrixMul(__global float * v1, __global float * v2, __global float * v3) 
{
    int localRow = get_local_id(0); 
    int localCol = get_local_id(1);  
    int selectRowFromA = get_group_id(0)*32;     
    int selectColFromB = get_group_id(1)*32;     
    int lid= localCol*16+localRow; 
    __local float Lcache1[ 16][ 16]; 
    __local float Lcache2[ 16][ 16]; 
    __local float Lcache3[ 16][ 16]; 

    __local float Lcache1a[ 16][ 16]; 
    __local float Lcache2a[ 16][ 16]; 
    __local float Lcache3a[ 16][ 16]; 

    __local float Lcache1b[ 16][ 16]; 
    __local float Lcache2b[ 16][ 16]; 
    __local float Lcache3b[ 16][ 16]; 

    __local float Lcache1c[ 16][ 16]; 
    __local float Lcache2c[ 16][ 16]; 
    __local float Lcache3c[ 16][ 16]; 

    float tmp0=0.0f; 
    float tmp1=0.0f; 
    float tmp2=0.0f; 
    float tmp3=0.0f; 

    float tmp4=0.0f; 
    float tmp5=0.0f; 
    float tmp6=0.0f; 
    float tmp7=0.0f; 

    float sumPatch=0.0f; 
    float sumPatcha=0.0f; 
    float sumPatchb=0.0f; 
    float sumPatchc=0.0f; 
    float sumPatch2=0.0f; 
    float sumPatcha2=0.0f; 
    float sumPatchb2=0.0f; 
    float sumPatchc2=0.0f; 

    barrier(CLK_LOCAL_MEM_FENCE); 
    Lcache3[localRow][localCol]=0.0f; 
    Lcache3a[localRow][localCol]=0.0f; 
    Lcache3b[localRow][localCol]=0.0f; 
    Lcache3c[localRow][localCol]=0.0f; 
    barrier(CLK_LOCAL_MEM_FENCE); 
    for(int i=0;i<1024;i+=32)  // this is A's row and B's column parsed by sub-matrices
    { 
        barrier(CLK_LOCAL_MEM_FENCE); 
        Lcache1[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024];
        Lcache2[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024];
        Lcache1a[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024+ 16];
        Lcache2a[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024+ 16];
        Lcache1b[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024+16384];
        Lcache2b[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024+16384];
        Lcache1c[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024+ 16+16384];
        Lcache2c[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024+ 16+16384];
        barrier(CLK_LOCAL_MEM_FENCE); 
        sumPatch=0.0f; 
        sumPatcha=0.0f; 
        sumPatchb=0.0f; 
        sumPatchc=0.0f; 
        sumPatch2=0.0f; 
        sumPatcha2=0.0f; 
        sumPatchb2=0.0f; 
        sumPatchc2=0.0f; 
        for(int kk=0;kk< 16;kk++) //this is sub-matrix multiplication
        {   
            read_mem_fence(CLK_LOCAL_MEM_FENCE); 
            tmp0=Lcache1[kk][localRow];  // row-major
            tmp1=Lcache1a[kk][localRow]; // accesses
            tmp2=Lcache1b[kk][localRow]; //to local memory
            tmp3=Lcache1c[kk][localRow]; 
            tmp4=Lcache2[kk][localCol]; 
            tmp5=Lcache2a[kk][localCol]; 
            tmp6=Lcache2b[kk][localCol]; 
            tmp7=Lcache2c[kk][localCol]; 
            read_mem_fence(CLK_LOCAL_MEM_FENCE); 
            sumPatch+=tmp0*tmp4; 
            sumPatcha+=tmp0*tmp6; 
            sumPatchb+=tmp2*tmp4; 
            sumPatchc+=tmp2*tmp6; 
            sumPatch2+=tmp1*tmp5; 
            sumPatcha2+=tmp1*tmp7; 
            sumPatchb2+=tmp3*tmp5; 
            sumPatchc2+=tmp3*tmp7; 
        } 
        Lcache3[localRow][localCol]+=sumPatch+sumPatch2; 
        Lcache3a[localRow][localCol]+=sumPatcha+sumPatcha2; 
        Lcache3b[localRow][localCol]+=sumPatchb+sumPatchb2; 
        Lcache3c[localRow][localCol]+=sumPatchc+sumPatchc2; 
    } 
    barrier(CLK_LOCAL_MEM_FENCE); 
    v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024]=Lcache3[localRow][localCol];                   
    v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024+ 16]=Lcache3a[localRow][localCol];              
    v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024+16384]=Lcache3b[localRow][localCol];     
    v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024+ 16+16384]=Lcache3c[localRow][localCol];     
    barrier(CLK_LOCAL_MEM_FENCE); 
}

这是我试图消除银行冲突的方法,但内核执行时间增加了大约 %20:

for(int kk=0;kk< 16;kk++) 
{   
    int nc=(kk+lid)&15;//different for all local threads
                       //but does not exceed 0-15 range
                       //summation order is not important
                       //0.+1.+...15. or 14.+15.+0.+..13.
                       //gives correct answer
    read_mem_fence(CLK_LOCAL_MEM_FENCE); 
    tmp0=Lcache1[nc][localRow]; 
    tmp1=Lcache1a[nc][localRow]; 
    tmp2=Lcache1b[nc][localRow]; 
    tmp3=Lcache1c[nc][localRow]; 
    tmp4=Lcache2[nc][localCol]; 
    tmp5=Lcache2a[nc][localCol]; 
    tmp6=Lcache2b[nc][localCol]; 
    tmp7=Lcache2c[nc][localCol]; 
    read_mem_fence(CLK_LOCAL_MEM_FENCE);
    sumPatch+=tmp0*tmp4;
    sumPatcha+=tmp0*tmp6;
    sumPatchb+=tmp2*tmp4;
    sumPatchc+=tmp2*tmp6;
    sumPatch2+=tmp1*tmp5;
    sumPatcha2+=tmp1*tmp7;
    sumPatchb2+=tmp3*tmp5;
    sumPatchc2+=tmp3*tmp7;
} 

这会是新gpus的广播技术吗?还对 16 个元素求和意味着只使用 16 个库?该设备有 32 个用于本地访问的存储库。

这是我试图隐藏内存延迟的内容:

for(int kk=0;kk< 16;kk++) 
{   
    int nc=(kk+lid)&15;//different for all local threads
                       //but does not exceed 0-15 range
                       //summation order is not important
                       //0.+1.+...15. or 14.+15.+0.+..13.
                       //gives correct answer
    read_mem_fence(CLK_LOCAL_MEM_FENCE); 
    tmp0=Lcache1[nc][localRow]; 
    tmp4=Lcache2[nc][localCol];
    sumPatch+=tmp0*tmp4; 
    tmp6=Lcache2b[nc][localCol];
    sumPatcha+=tmp0*tmp6; 
    tmp1=Lcache1a[nc][localRow];
    tmp7=Lcache2c[nc][localCol]; 
    sumPatcha2+=tmp1*tmp7; 
    tmp5=Lcache2a[nc][localCol];
    sumPatch2+=tmp1*tmp5; 
    tmp2=Lcache1b[nc][localRow]; 
    sumPatchb+=tmp2*tmp4;
    sumPatchc+=tmp2*tmp6; 
    tmp3=Lcache1c[nc][localRow]; 
    sumPatchb2+=tmp3*tmp5;
    sumPatchc2+=tmp3*tmp7;  
    read_mem_fence(CLK_LOCAL_MEM_FENCE);//this lines' position does not change time 
}

但这并没有增加或减少 exec。时间。

如何提高内核时间?可行吗?

设备:HD7870 @ 1000MHz/1200MHz 主机:FX8150@4GHz 头文件,来自 Khronos 网站的 LIB 文件,来自 AMD 驱动程序的 opencl.dll。

时间采样通过以下方式完成:将内核循环 100 次,然后将总时间除以Stopwatchstart() 和 stop() 方法的 100.0。并且仅用于执行,不包括数组副本。

所有结果都与具有相同随机矩阵输入的朴素 3 嵌套循环版本进行比较(结果在 m(ij)+/-delta 内,其中 delta 为 0.001f。)

这里的内核是更通用的简化版本(针对不同的矩阵和补丁大小)

本版本内核参数:Global=512,512 Local=16,16, Reference=0,0

对于 8320x8320 矩阵 --->Global=4160,4160, Local=16,16, ref=0,0 time = 1.87Seconds

编辑:在 DarkZeros 的建议下,用私有版本替换本地 Lcache3 将 1024x1024 时间提高到 2.7 毫秒。这是每秒 795 GFlops。这一定是来自于更好的占有率。

Edit2:较少的本地使用打开了使用 48x48 (9 x 16x16) 补丁的可能性,这使得 1056x1056 乘法 2.4 ms ---->981 Gflops/s。8208x8208 在 961 毫秒内完成,超过 1150 GFlops。

4

1 回答 1

2

为什么有这么多栅栏?事实上,我认为你甚至根本不需要它们。仅当写入本地的线程将被其他线程读取时,您才需要围栏。不是当那个线程读写他的本地内存时。

顺便说一句,栅栏比障碍好得多。在屏障中,您强制线程同步。在某些情况下,这会破坏性能。

我认为您可以通过更改内存访问模型来重写代码以提高速度。

你可以试试这个效果是否更好(我做了很多明显的优化,甚至不知道你的代码在做什么):

__kernel void squareGpuMatrixMul(__global float * v1, __global float * v2, __global float * v3) 
{
    int localRow = get_local_id(0); 
    int localCol = get_local_id(1);  
    int selectRowFromA = get_group_id(0)*32;     
    int selectColFromB = get_group_id(1)*32;     
    int lid= localCol*16+localRow; 
    __local float Lcache1[ 16][ 16]; 
    __local float Lcache2[ 16][ 16]; 
    __local float Lcache3[ 16][ 16]; 

    __local float Lcache1a[ 16][ 16]; 
    __local float Lcache2a[ 16][ 16]; 
    __local float Lcache3a[ 16][ 16]; 

    __local float Lcache1b[ 16][ 16]; 
    __local float Lcache2b[ 16][ 16]; 
    __local float Lcache3b[ 16][ 16]; 

    __local float Lcache1c[ 16][ 16]; 
    __local float Lcache2c[ 16][ 16]; 
    __local float Lcache3c[ 16][ 16]; 

    float tmp0=0.0f; 
    float tmp1=0.0f; 
    float tmp2=0.0f; 
    float tmp3=0.0f; 

    float tmp4=0.0f; 
    float tmp5=0.0f; 
    float tmp6=0.0f; 
    float tmp7=0.0f; 

    float sumPatch=0.0f; 
    float sumPatcha=0.0f; 
    float sumPatchb=0.0f; 
    float sumPatchc=0.0f; 
    float sumPatch2=0.0f; 
    float sumPatcha2=0.0f; 
    float sumPatchb2=0.0f; 
    float sumPatchc2=0.0f; 

    Lcache3[localRow][localCol]=0.0f; 
    Lcache3a[localRow][localCol]=0.0f; 
    Lcache3b[localRow][localCol]=0.0f; 
    Lcache3c[localRow][localCol]=0.0f; 
    for(int i=0;i<1024;i+=32)  // this is A's row and B's column parsed by sub-matrices
    { 
        Lcache1[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024];
        Lcache2[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024];
        Lcache1a[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024+ 16];
        Lcache2a[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024+ 16];
        Lcache1b[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024+16384];
        Lcache2b[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024+16384];
        Lcache1c[localCol][localRow]=v1[selectRowFromA*1024+i+localCol+localRow*1024+ 16+16384];
        Lcache2c[localRow][localCol]=v2[selectColFromB*1024+i+localRow+localCol*1024+ 16+16384];
        mem_fence(CLK_LOCAL_MEM_FENCE);  
        sumPatch=0.0f; 
        sumPatcha=0.0f; 
        sumPatchb=0.0f; 
        sumPatchc=0.0f; 
        sumPatch2=0.0f; 
        sumPatcha2=0.0f; 
        sumPatchb2=0.0f; 
        sumPatchc2=0.0f; 
        for(int kk=0;kk< 16;kk++) //this is sub-matrix multiplication
        {   
            tmp0=Lcache1[kk][localRow];  // row-major
            tmp1=Lcache1a[kk][localRow]; // accesses
            tmp2=Lcache1b[kk][localRow]; //to local memory
            tmp3=Lcache1c[kk][localRow]; 
            tmp4=Lcache2[kk][localCol]; 
            tmp5=Lcache2a[kk][localCol]; 
            tmp6=Lcache2b[kk][localCol]; 
            tmp7=Lcache2c[kk][localCol]; 
            sumPatch+=tmp0*tmp4; 
            sumPatcha+=tmp0*tmp6; 
            sumPatchb+=tmp2*tmp4; 
            sumPatchc+=tmp2*tmp6; 
            sumPatch2+=tmp1*tmp5; 
            sumPatcha2+=tmp1*tmp7; 
            sumPatchb2+=tmp3*tmp5; 
            sumPatchc2+=tmp3*tmp7; 
        } 
        Lcache3[localRow][localCol]+=sumPatch+sumPatch2; 
        Lcache3a[localRow][localCol]+=sumPatcha+sumPatcha2; 
        Lcache3b[localRow][localCol]+=sumPatchb+sumPatchb2; 
        Lcache3c[localRow][localCol]+=sumPatchc+sumPatchc2; 
    } 
    mem_fence(CLK_LOCAL_MEM_FENCE); 
    v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024]=Lcache3[localRow][localCol];                   
    v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024+ 16]=Lcache3a[localRow][localCol];              
    v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024+16384]=Lcache3b[localRow][localCol];     
    v3[selectRowFromA*1024+selectColFromB+localCol+localRow*1024+ 16+16384]=Lcache3c[localRow][localCol];     

}
于 2013-08-08T22:47:55.513 回答