5

我正在研究我的论文项目,试图研究 OpenCL 管道的使用是否以及何时在 CPU 上也有用(我们已经知道它们在 FPGA 中被广泛使用)。

我正在尝试实现最简单的算法,其中我有一个生产者(写入管道)和一个消费者内核(从管道读取)。我想并行执行两个内核并对管道具有阻塞行为(仅在管道不为空时读取)。阅读Intel_FPGA_Opencl上的英特尔文档,解释了为管道声明的属性__attribute__((blocking))应该在管道为空时阻止读取操作。但是,当我尝试使用我得到的属性时__write_pipe_2_bl is undefined。我什至尝试使用 while 循环来模拟阻塞行为(如 intel 文档中所示),但即使管道不为空,内核也会停止。

此外,除非我使用两个不同的命令队列,否则内核似乎不会并行运行。

内核代码:

#pragma OPENCL EXTENSION cl_intel_printf : enable
#define SIZE 1000

__kernel void pipe_writer(__global int *in,
                          write_only pipe int __attribute((depth(SIZE))) p)
{
    for(int i = 0; i < SIZE; i++){
        write_pipe(p, &in[i]);
        printf("written: %d\n", in[i]);
    }

}

__kernel void pipe_reader(__global int *out,
                          read_only pipe int __attribute((depth(SIZE))) p)
{
    for(int i = 0; i < SIZE; i++){
        while (read_pipe(p, &out[i]) == -1){
        //printf("blocked read\n";
        }
        //int check = read_pipe(p, &out[i]);
        printf("read: %d\n", out[i]);
    }
}

主机代码:

#include <stdio.h>
#include <stdlib.h>

#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif

#define MAX_SOURCE_SIZE (0x100000)

int main(void) {
    // Create the two input vectors
    int bb = 0;
    int i;
    const int LIST_SIZE = 1000;
    int *A = (int*)malloc(sizeof(int)*LIST_SIZE);
    int *B = (int*)malloc(sizeof(int)*LIST_SIZE);
    for(i = 0; i < LIST_SIZE; i++) {
        A[i] = i;
    }

    // Load the kernel source code into the array source_str
    FILE *fp;
    char *source_str;
    size_t source_size;

    fp = fopen("kernel.cl", "r");
    if (!fp) {
        fprintf(stderr, "Failed to load kernel.\n");
        exit(1);
    }
    source_str = (char*)malloc(MAX_SOURCE_SIZE);
    source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
    fclose( fp );

    // Get platform and device information
    cl_platform_id platform_id = NULL;
    cl_device_id device_id = NULL;
    cl_uint ret_num_devices;
    cl_uint ret_num_platforms;
    cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
    if(ret != CL_SUCCESS){
        printf("getPlatformId, ERROR CODE: %d\n", ret);
        bb=1;
    }

    ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_CPU, 1,
            &device_id, &ret_num_devices);
    if(ret != CL_SUCCESS){
        printf("getDevice, ERROR CODE: %d\n", ret);
        bb=1;
    }

    // Create an OpenCL context
    cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
    if(ret != CL_SUCCESS){
        printf("createContext, ERROR CODE: %d\n", ret);
        bb=1;
    }
    // Create a command queue
    cl_command_queue command_queue = clCreateCommandQueue(context, device_id, 0, &ret);
    cl_command_queue command_queue2 = clCreateCommandQueue(context, device_id, 0, &ret);

    if(ret != CL_SUCCESS){
        printf("commandQueue, ERROR CODE: %d\n", ret);
        bb=1;
    }

    // Create memory buffers on the device for each vector 
    cl_mem a_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
            LIST_SIZE * sizeof(int), NULL, &ret);
    cl_mem b_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
            LIST_SIZE * sizeof(int), NULL, &ret);

    if(ret != CL_SUCCESS){
        printf("memobj, ERROR CODE: %d\n", ret);
        bb=1;
    }

    ret = clEnqueueWriteBuffer(command_queue, a_mem_obj, CL_TRUE, 0,
            LIST_SIZE * sizeof(int), A, 0, NULL, NULL);

    if(ret != CL_SUCCESS){
        printf("enqueuewritebuffer, ERROR CODE: %d\n", ret);
        bb=1;
    }

    cl_program program = clCreateProgramWithSource(context, 1,
            (const char **)&source_str, (const size_t *)&source_size, &ret);

    if(ret != CL_SUCCESS){
        printf("crateProgWithSource, ERROR CODE: %d\n", ret);
        bb=1;
    }

    // Build the program
    ret = clBuildProgram(program, 1, &device_id, "-cl-std=CL2.0", NULL, NULL);

    if(ret != CL_SUCCESS){
        printf("buildProgram, ERROR CODE: %d\n", ret);
        bb=1;
    }

    /////Debug Kernel compilation:
    size_t ret_val_size;
    clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size);
    char * build_log = (char*) malloc(sizeof(char) * (ret_val_size));
    clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL);
    printf("LOG: \n%s\n", build_log);
    ///////////////////////////////

    cl_kernel pipe_writer = clCreateKernel(program, "pipe_writer", &ret);
    if(ret != CL_SUCCESS){
        printf("createkernelwriter, ERROR CODE: %d\n", ret);
        bb=1;
    }

    cl_kernel pipe_reader = clCreateKernel(program, "pipe_reader", &ret);
    if(ret != CL_SUCCESS){
        printf("createkernelReader, ERROR CODE: %d\n", ret);
        bb=1;
    }

    cl_mem pipe = clCreatePipe(context, 0, sizeof(cl_int), 1000, NULL, &ret);
    if(ret != CL_SUCCESS){
        printf("createPipe, ERROR CODE: %d\n", ret);
        bb=1;
    }

    // Set the arguments of the kernel
    ret = clSetKernelArg(pipe_writer, 0, sizeof(cl_mem), (void *)&a_mem_obj);
    if(ret != CL_SUCCESS){
        printf("setArgWriterZERO, ERROR CODE: %d\n", ret);
        bb=1;
    }

    ret = clSetKernelArg(pipe_writer, 1, sizeof(cl_mem), &pipe);
    if(ret != CL_SUCCESS){
        printf("setArgWriterONE, ERROR CODE: %d\n", ret);
        bb=1;
    }

    ret = clSetKernelArg(pipe_reader, 0, sizeof(cl_mem), (void *)&b_mem_obj);
    if(ret != CL_SUCCESS){
        printf("setArgReaderZERO, ERROR CODE: %d\n", ret);
        bb=1;
    }

    ret = clSetKernelArg(pipe_reader, 1, sizeof(cl_mem), &pipe);
    if(ret != CL_SUCCESS){
        printf("setArgReaderONE, ERROR CODE: %d\n", ret);
        bb=1;
    }

    // Execute the OpenCL kernel on the list
    size_t global_item_size = 1; // Process the entire lists
    size_t local_item_size = 1; // Divide work items into groups of 64

    cl_event sync; //??

    ret = clEnqueueTask (command_queue, pipe_writer, NULL, NULL, NULL);
    if(ret != CL_SUCCESS){
        printf("EnqueueKernelWriter, ERROR CODE: %d\n", ret);
        bb=1;
    }
    if(ret != CL_SUCCESS){
        printf("EnqueueKernelwriter, ERROR CODE: %d\n", ret);
        bb=1;
    }

    ret = clEnqueueTask (command_queue2, pipe_reader, NULL, NULL, NULL);
    if(ret != CL_SUCCESS){
        printf("EnqueueKernelWriter, ERROR CODE: %d\n", ret);
        bb=1;
    }
    if(ret != CL_SUCCESS){
        printf("EnqueueKernelReader, ERROR CODE: %d\n", ret);
        bb=1;
    }

    ret = clEnqueueReadBuffer(command_queue2, b_mem_obj, CL_TRUE, 0,
            LIST_SIZE * sizeof(int), B, 0, NULL, NULL);
    if(ret != CL_SUCCESS){
       printf("EnqueueReadBuffer, ERROR CODE: %d\n", ret);
       bb=1;
    }

    if(bb == 0){
    // Display the result to the screen
    for(i = 0; i < LIST_SIZE; i++)
        printf("%d and %d\n", A[i], B[i]);
    }

    // Clean up
    ret = clFlush(command_queue);
    ret = clFinish(command_queue);
    ret = clReleaseKernel(pipe_writer);
    ret = clReleaseKernel(pipe_reader);
    ret = clReleaseProgram(program);
    ret = clReleaseMemObject(a_mem_obj);
    ret = clReleaseMemObject(b_mem_obj);
    ret = clReleaseCommandQueue(command_queue);
    ret = clReleaseContext(context);
    free(A);
    free(B);
    return 0;
}

这就是我正在使用运行“clinfo”的 CPU 得到的信息:

Platform Name                                   Intel(R) CPU Runtime for OpenCL(TM) Applications
Number of devices                                 1
  Device Name                                     Intel(R) Xeon(R) CPU E5-2698 v4 @ 2.20GHz
  Device Vendor                                   Intel(R) Corporation
  Device Vendor ID                                0x8086
  Device Version                                  OpenCL 2.1 (Build 0)
  Driver Version                                  18.1.0.0920
  Device OpenCL C Version                         OpenCL C 2.0 
  Device Type                                     CPU
  Device Profile                                  FULL_PROFILE
  Max compute units                               80
  Max clock frequency                             2200MHz
  Device Partition                                (core)
    Max number of sub-devices                     80
    Supported partition types                     by counts, equally, by names (Intel)
  Max work item dimensions                        3
  Max work item sizes                             8192x8192x8192
  Max work group size                             8192
  Preferred work group size multiple              128
  Max sub-groups per work group                   1
  Preferred / native vector sizes                 
    char                                                 1 / 32      
    short                                                1 / 16      
    int                                                  1 / 8       
    long                                                 1 / 4       
    half                                                 0 / 0        (n/a)
    float                                                1 / 8       
    double                                               1 / 4        (cl_khr_fp64)
  Half-precision Floating-point support           (n/a)
  Single-precision Floating-point support         (core)
    Denormals                                     Yes
    Infinity and NANs                             Yes
    Round to nearest                              Yes
    Round to zero                                 No
    Round to infinity                             No
    IEEE754-2008 fused multiply-add               No
    Support is emulated in software               No
    Correctly-rounded divide and sqrt operations  No
  Double-precision Floating-point support         (cl_khr_fp64)
    Denormals                                     Yes
    Infinity and NANs                             Yes
    Round to nearest                              Yes
    Round to zero                                 Yes
    Round to infinity                             Yes
    IEEE754-2008 fused multiply-add               Yes
    Support is emulated in software               No
    Correctly-rounded divide and sqrt operations  No
  Address bits                                    64, Little-Endian
  Global memory size                              540956721152 (503.8GiB)
  Error Correction support                        No
  Max memory allocation                           135239180288 (126GiB)
  Unified memory for Host and Device              Yes
  Shared Virtual Memory (SVM) capabilities        (core)
    Coarse-grained buffer sharing                 Yes
    Fine-grained buffer sharing                   Yes
    Fine-grained system sharing                   Yes
    Atomics                                       Yes
  Minimum alignment for any data type             128 bytes
  Alignment of base address                       1024 bits (128 bytes)
  Preferred alignment for atomics                 
    SVM                                           64 bytes
    Global                                        64 bytes
    Local                                         0 bytes
  Max size for global variable                    65536 (64KiB)
  Preferred total size of global vars             65536 (64KiB)
  Global Memory cache type                        Read/Write
  Global Memory cache size                        262144
  Global Memory cache line                        64 bytes
  Image support                                   Yes
    Max number of samplers per kernel             480
    Max size for 1D images from buffer            8452448768 pixels
    Max 1D or 2D image array size                 2048 images
    Base address alignment for 2D image buffers   64 bytes
    Pitch alignment for 2D image buffers          64 bytes
    Max 2D image size                             16384x16384 pixels
    Max 3D image size                             2048x2048x2048 pixels
    Max number of read image args                 480
    Max number of write image args                480
    Max number of read/write image args           480
  Max number of pipe args                         16
  Max active pipe reservations                    3276
  Max pipe packet size                            1024
  Local memory type                               Global
  Local memory size                               32768 (32KiB)
  Max constant buffer size                        131072 (128KiB)
  Max number of constant args                     480
  Max size of kernel argument                     3840 (3.75KiB)
  Queue properties (on host)                      
    Out-of-order execution                        Yes
    Profiling                                     Yes
    Local thread execution (Intel)                Yes
  Queue properties (on device)                    
    Out-of-order execution                        Yes
    Profiling                                     Yes
    Preferred size                                4294967295 (4GiB)
    Max size                                      4294967295 (4GiB)
  Max queues on device                            4294967295
  Max events on device                            4294967295
  Prefer user sync for interop                    No
  Profiling timer resolution                      1ns
  Execution capabilities                          
    Run OpenCL kernels                            Yes
    Run native kernels                            Yes
    Sub-group independent forward progress        No
    IL version                                    SPIR-V_1.0
    SPIR versions                                 1.2
  printf() buffer size                            1048576 (1024KiB)
  Built-in kernels                                
  Device Available                                Yes
  Compiler Available                              Yes
  Linker Available                                Yes
  Device Extensions                               cl_khr_icd cl_khr_global_int32_base_atomics cl_khr_global_int32_extended_atomics cl_khr_local_int32_base_atomics cl_khr_local_int32_extended_atomics cl_khr_byte_addressable_store cl_khr_depth_images cl_khr_3d_image_writes cl_intel_exec_by_local_thread cl_khr_spir cl_khr_fp64 cl_khr_image2d_from_buffer cl_intel_vec_len_hint 
4

1 回答 1

5

用于英特尔 FPGA 的 OpenCL 2.0 管道与用于 CPU 的标准 OpenCL 2.0 完全不同。

一个重要的区别是标准的 OpenCL 2.0 管道并不是 用来在并发内核之间建立通信的。管道是内存对象的子类,它们的状态仅在同步点强制执行(参见 OpenCL 1.2 规范的 s3.3.1 内存一致性),其中同步点是命令队列屏障或等待事件(参见 s3.4.3同步)。换句话说,根据 OpenCL 规范,写入管道的数据只有在内核完成执行时才可见。

面向 FPGA 的英特尔 OpenCL 具有使 OpenCL 2.0 管道可用于 FPGA 的附加功能(扩展):具体而言,它保证内核可以通过管道进行通信,并提供一些扩展以使此类通信更简单、更高效(阻塞管道、主机管道、管道深度)。英特尔 OpenCL 运行时 CPU 不支持所有这些功能。

但是,出于您论文项目的目的,您可以查看 Intel FPGA SDK 中的 Fast Emulator:基本上它是一个支持 FPGA 扩展的 CPU 运行时,包括管道(具有内核到内核通信)和主机管道。请参阅 https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/hb/opencl-sdk/aocl_programming_guide.pdf s8.7。使用快速模拟器(预览版)。

此外,除非我使用两个不同的命令队列,否则内核似乎不会并行运行。

如果您不使用 来创建命令队列 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE,则队列是有序的,这意味着推送到此队列的命令之间存在隐式依赖关系,因此它们不能并行运行。

此外,您应该调用clFlush(command_queue)before clEnqueueReadBuffer(command_queue2, ...)以确保在对读取器进行阻塞调用之前启动编写器内核。

于 2019-02-04T21:12:50.127 回答