0

编辑:这是我的卡的错......本地内存内核快了几倍,对不起!

我正在编写一个简单的 sgemm (square, alpha=1, beta=0),它应该利用本地内存,但它的执行速度是原始版本的一半。

以下是内核:

    const char* matrixMultiplySource = 
    "__kernel\n"
    "   void matrixMultiply(__global float* A, __global float* B, __global float* C)\n"
    "   {\n"
    "   int i = get_local_id(0);\n"
    "   int j = get_local_id(1);\n"
    "   int ig = get_global_id(0);\n"
    "   int jg = get_global_id(1);\n"
    "   int sizeG0 = get_global_size(0);\n"
    "   __local float localA[BLOCK_SIZE][BLOCK_SIZE];\n"
    "   __local float localB[BLOCK_SIZE][BLOCK_SIZE];\n"
    "   float val=0.0f;\n"
    "   for ( int index = 0; index < sizeG0; index += BLOCK_SIZE )\n"
    "   {\n"
    "       localA[j][i] = A[ig + sizeG0 * (index+j)];\n"
    "       localB[j][i] = B[index+i + sizeG0 * jg];\n"
    "       barrier(CLK_GLOBAL_MEM_FENCE);\n"
    "       #pragma unroll\n"
    "       for ( int kk = 0; kk < BLOCK_SIZE; ++kk)\n"
    "       {\n" 
    "           val = val + localA[kk][i] * localB[j][kk];\n"
    "       }\n"
    "       barrier(CLK_GLOBAL_MEM_FENCE);\n"
    "   }\n"
    "   C[ig + sizeG0 * jg] = val;\n"
    "}\n"
    ;

const char* matrixMultiplySource2 = 
    "__kernel\n"
    "   void matrixMultiply(__global float* A, __global float* B, __global float* C)\n"
    "   {\n"
    "   int ig = get_global_id(0);\n"
    "   int jg = get_global_id(1);\n"
    "   int sizeG0 = get_global_size(0);\n"
    "   float val=0;\n"
    "   for ( int k = 0; k < sizeG0; k++)\n"
    "   {\n"
    "       val = val + A[ig + k * sizeG0] * B[k + jg * sizeG0];\n"
    "   }\n"
    "   C[ig + sizeG0 * jg] = val;\n"
    "}\n"
    ;

BLOCK_SIZE 是 16,我正在使用 1024x1024 矩阵以及热身。

    // Create OpenCL context 
context = mycl::myclCreateContext( NULL, ret_num_devices, devices, NULL, NULL, &ret);

// Create Command Queue 
command_queue = mycl::myclCreateCommandQueue(context, devices[0], 0, &ret);

// Create Memory Buffer 
memobjA = mycl::myclCreateBuffer(context, CL_MEM_READ_ONLY, widthA * heightA * sizeof(float), NULL, &ret);
memobjB = mycl::myclCreateBuffer(context, CL_MEM_READ_ONLY, widthB * heightB * sizeof(float), NULL, &ret);
memobjC = mycl::myclCreateBuffer(context, CL_MEM_READ_WRITE, widthC * heightC * sizeof(float), NULL, &ret);


// Copy the lists A and B to their respective memory buffers
ret = mycl::myclEnqueueWriteBuffer(command_queue,memobjA, CL_TRUE, 0,
        widthA * heightA * sizeof(float), A, 0, NULL, NULL);
ret = mycl::myclEnqueueWriteBuffer(command_queue, memobjB, CL_TRUE, 0,
        widthB * heightB * sizeof(float), B, 0, NULL, NULL);

// Create Kernel Program from the source 
program = mycl::myclCreateProgramWithSource(context, 1, (const char **)&matrixMultiplySource,
        NULL, &ret);

// Build Kernel Program 
ret = mycl::myclBuildProgram(program, ret_num_devices, devices, "-D BLOCK_SIZE=16", NULL, NULL);
if(ret != CL_SUCCESS){cout << "PROBREM! " << ret << endl;return -1;}

// Create OpenCL Kernel 
kernel = mycl::myclCreateKernel(program, "matrixMultiply", &ret);

size_t globalThreads[2] = {heightA, widthB};
size_t localThreads[2] = {BLOCK_SIZE, BLOCK_SIZE};

// Set OpenCL Kernel Arguments 
ret = mycl::myclSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&memobjA);
ret = mycl::myclSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&memobjB);
ret = mycl::myclSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&memobjC);   

// Time the kernel
struct timeval timev1, timev2;
float time_seconds = 0.0f;
mycl::myclEnqueueNDRangeKernel(command_queue, kernel, 2, NULL, globalThreads, localThreads, 0, 0, NULL);
mycl::myclFinish(command_queue);
gettimeofday(&timev1, NULL);

ret = mycl::myclEnqueueNDRangeKernel(command_queue, kernel, 2, NULL, globalThreads, localThreads, 0, 0, NULL);
if(ret != CL_SUCCESS){cout << "fail! " << ret << endl;}
ret = mycl::myclFinish(command_queue);  
if(ret != CL_SUCCESS){cout << "fail! " << ret << endl;}

gettimeofday(&timev2,NULL);
time_seconds=(timev2.tv_sec-timev1.tv_sec)+0.000001*(timev2.tv_usec- timev1.tv_usec);
4

2 回答 2

0

You use

barrier(CLK_GLOBAL_MEM_FENCE);

where I would expect to see

barrier(CLK_LOCAL_MEM_FENCE);

as you write in the loop to local memory. Further I doubt that the copy to localA does help you -- at one time every items there is only accessed once.

于 2014-02-07T21:42:00.830 回答
0

您是否查看过 AMD APP KernelAnalyzer 或等效工具中的两个内核?这些工具编译内核并显示其预测的性能特征

于 2012-07-27T13:37:07.867 回答