0

我正在尝试打印 GPU 上某些功能的执行时间。但是 GPU 上的时间总是为 0。此外,当我在下面选择 CL_DEVICE_TYPE_CPU 时,它工作正常。

 errcode = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, &ret_num_devices);

这工作正常,并显示非零值的执行时间,但如果我选择 CL_DEVICE_TYPE_GPU,那么它总是显示 0,不管总数。数据点和线程。请注意,在这两种情况下(CL_DEVICE_TYPE_CPU 和 CL_DEVICE_TYPE_GPU),我都以相同的方式打印执行时间。那是我的主机代码,我的内核代码在这两种情况下都是相同的(这就是 openCL !)。以下是部分代码部分:

  // openCL code to get platform and device ids
errcode = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
  errcode = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, &ret_num_devices);

// to create context

   clGPUContext = clCreateContext( NULL, 1, &device_id, NULL, NULL, &errcode);
   //Create a command-queue
   clCommandQue = clCreateCommandQueue(clGPUContext, 
              device_id, CL_QUEUE_PROFILING_ENABLE, &errcode);

// Setup device memory
   d_instances= clCreateBuffer(clGPUContext,CL_MEM_READ_ONLY |    
  CL_MEM_COPY_HOST_PTR,mem_size_i,instances->data, &errcode);
  d_centroids = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size_c, NULL, &errcode);
  d_distance = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size_d,NULL, &errcode);
// d_dist_X = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size4,NULL, &errcode);
//d_dist_Y = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size4,NULL, &errcode);

//to build program
clProgram = clCreateProgramWithSource(clGPUContext,1, (const char **)&source_str,(const 
  size_t*)&source_size, &errcode);

  errcode = clBuildProgram(clProgram, 0,NULL, NULL, NULL, NULL);

  if (errcode == CL_BUILD_PROGRAM_FAILURE) 
{
    // Determine the size of the log
    size_t log_size;
    clGetProgramBuildInfo(clProgram, device_id, CL_PROGRAM_BUILD_LOG, 0, NULL, 
  &log_size);

    // Allocate memory for the log
    char *log = (char *) malloc(log_size);

    // Get the log
    clGetProgramBuildInfo(clProgram, device_id, CL_PROGRAM_BUILD_LOG, log_size, log, 

 NULL);

    // Print the log
    printf("%s\n", log);
}
clKernel = clCreateKernel(clProgram,"distance_finding", &errcode);

// Launch OpenCL kernel
size_t localWorkSize[1], globalWorkSize[1];
if(num_instances >= 500)
{
    localWorkSize[0] = 500;
    float block1=num_instances/localWorkSize[0];
    int block= (int)(ceil(block1));
    globalWorkSize[0] = block*localWorkSize[0];
}
else
{
    localWorkSize[0]=num_instances;
    globalWorkSize[0]=num_instances;
}

int iteration=1;
while(iteration < MAX_ITERATIONS)
{
    errcode = clEnqueueWriteBuffer(clCommandQue,d_centroids , CL_TRUE, 0, 
 mem_size_c, (void*)centroids->data, 0, NULL, NULL);
    errcode = clEnqueueWriteBuffer(clCommandQue,d_distance , CL_TRUE, 0, mem_size_d, 

 (void*)distance->data, 0, NULL, NULL);

    //set kernel arguments
    errcode = clSetKernelArg(clKernel, 0,sizeof(cl_mem), (void *)&d_instances);
    errcode = clSetKernelArg(clKernel, 1,sizeof(cl_mem), (void *)&d_centroids);
    errcode = clSetKernelArg(clKernel, 2,sizeof(cl_mem), (void *)&d_distance);
    errcode = clSetKernelArg(clKernel, 3,sizeof(unsigned int), (void *)

  &num_instances);
    errcode = clSetKernelArg(clKernel,4,sizeof(unsigned int),(void *)&clusters);
    errcode = clSetKernelArg(clKernel,5,sizeof(unsigned int),(void *)&dimensions);

    errcode = clEnqueueNDRangeKernel(clCommandQue,clKernel, 1, NULL, 
  globalWorkSize,localWorkSize, 0, NULL, &myEvent);

    clFinish(clCommandQue); // wait for all events to finish
    clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_START,sizeof(cl_ulong), 

 &startTime, NULL);
    clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END,sizeof(cl_ulong), 

 &endTime, NULL);
    kernelExecTimeNs = endTime-startTime;
    gpu_time+= kernelExecTimeNs;

    // Retrieve result from device
    errcode = clEnqueueReadBuffer(clCommandQue,d_distance, CL_TRUE, 0, 
 mem_size_d,distance->data, 0, NULL, NULL);

以毫秒为单位打印时间

printf("\n\n Time taken by GPU is %llu ms",gpu_time/1000000);

如果我计算 GPU 时序的方式是错误的,为什么它会在 CPU 上工作(通过更改为 CL_DEVICE_TYPE_CPU)?这里有什么问题?

编辑:

系统信息

AMD APP SDK 2.4 AMD ATI FirePro GL 3D,拥有 800 个内核

内核

 #pragma OPENCL EXTENSION cl_khr_fp64:enable
double distance_cal(__local float* cent,float* data,int dimensions)
{
float dist1=0.00;
for(int i=0;i<dimensions;i++)
    dist1 += ((data[i]-cent[i]) * (data[i]-cent[i]));
double sq_dist=sqrt(dist1);
return sq_dist;
}
void fetch_col(float* data,__constant float* x,int col,int dimension,int len)
{
//hari[i]=8;
for(int i=0;i<dimension;i++)
{
data[i]=x[col];
    col=col+len;
}
}
void fetch_col_cen(__local float* data,__global float* x,int col,int dimension,int len)
{
//hari[i]=8;
for(int i=0;i<dimension;i++)
{
data[i]=x[col];
    col=col+len;
}
}


 __kernel void distance_finding(__constant float* data,__global float* cen,__global float* 
 dist,int       inst,int clus,const int dimensions)
  {
int idx=get_global_id(0);
float data_col[4];
fetch_col(  data_col,data,idx,dimensions,inst);

for(int i=0;i<clus;i++)
{
    int k=i*inst; // take each dimension value for each cluster data

    __local float cent[4];
    barrier(CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE);
    fetch_col_cen(cent,cen,i,dimensions,clus);

    dist[idx+k]=distance_cal(cent,data_col,dimensions);// calculate distance wrt     
 each data n each centroid

}

}
4

3 回答 3

1

clEnqueueNDRangeKernel()如果它使用 GPU,则它是异步的,因此您只能看到将请求排入队列而不是执行它所花费的时间。

也就是说,我可能是错的,但我通常编写 c++ 代码来进行计时并将指令放在start_time指令之前和end_time之后

clFinish(cmd_queue); 

就像你对 C++ 计时代码所做的那样,如果你确定你的 GPU 不应该在0几秒钟内完成,那将是一个很好的测试。

于 2012-05-24T13:35:22.707 回答
0

一个简单的检查方法是在内核中引入异常长的操作。如果在实际执行中有明显的滞后时 THAT 显示为零 - 那么您就有了答案。

也就是说,我相信(即使指定的线程适用于 Linux,它也可能在 Windows 上也适用)您可能需要安装检测驱动程序才能让系统写入性能计数器。您还可以在 nVidia 的 OpenCL 实现中使用 CUDA 分析器,因为它位于 CUDA 之上。

于 2012-05-23T19:40:38.787 回答
0

改成

clFinish(clCommandQue); // wait for all events to finish

// add this after clFinish()
// Ensure kernel execution is finished
clWaitForEvents(1 , &myEvent);

..

double gpu_time = endTime-startTime;

..

printf("\n\n Time taken by GPU is %0.3f ms", gpu_time/1000000.0);
于 2014-01-28T13:52:20.437 回答