0

我正在尝试使用 3 个 OpenMP 线程在 3 个 nVidia GPU 上分配将两个 NxN 矩阵相乘的工作。(矩阵值会变大,因此 long long 数据类型。)但是我无法将其放置#pragma acc parallel loop在正确的位置。我在共享的 nVidia PDF 中使用了一些示例,但没有成功。我知道最里面的循环不能并行化。但我希望三个线程中的每一个都拥有一个 GPU 并完成部分工作。请注意,输入和输出矩阵被定义为全局变量,因为我一直用完堆栈内存。

我已经尝试了下面的代码,但我得到的编译错误都指向第 75 行,这是该#pragma acc parallel loop

[test@server ~]pgcc -acc -mp -ta=tesla:cc60 -Minfo=all -o testGPU matrixMultiplyopenmp.c

PGC-S-0035-Syntax error: Recovery attempted by replacing keyword for by keyword barrier (matrixMultiplyopenmp.c: 75)

PGC-S-0035-Syntax error: Recovery attempted by replacing acc by keyword enum (matrixMultiplyopenmp.c: 76)

PGC-S-0036-Syntax error: Recovery attempted by inserting ';' before keyword for (matrixMultiplyopenmp.c: 77)

PGC/x86-64 Linux 18.10-1: compilation completed with severe errors

功能是:

void multiplyMatrix(long long int matrixA[SIZE][SIZE], long long int matrixB[SIZE][SIZE], long long int matrixProduct[SIZE][SIZE])
{
    // Get Nvidia device type
    acc_init(acc_device_nvidia);

    // Get Number of GPUs in system
    int num_gpus = acc_get_num_devices(acc_device_nvidia);

    //Set the number of OpenMP thread to the number of GPUs
    #pragma omp parallel num_threads(num_gpus)
    {
        //Get thread openMP number and set the GPU device to that number
        int threadNum = omp_get_thread_num();
        acc_set_device_num(threadNum, acc_device_nvidia);

        int row;
        int col;
        int key;

        #pragma omp for
        #pragma acc parallel loop
        for (row = 0; row < SIZE; row++)
            for (col = 0; col < SIZE; col++)
                for (key = 0; key < SIZE; key++)
                    matrixProduct[row][col] = matrixProduct[row][col] + (matrixA[row][key] * matrixB[key][col]);
    }
}
4

2 回答 2

1

正如 fisehara 指出的那样,您不能在同一个 for 循环上同时使用 OpenMP“for”循环和 OpenACC 并行循环。相反,您需要跨 OpenMP 线程手动分解工作。下面的例子。

你有理由在这里使用多个 GPU 吗?矩阵乘法很可能适合单个 GPU,因此不需要引入主机端并行化的额外开销。

另外,我一般推荐使用 MPI+OpenACC 进行多 GPU 编程。域分解自然是 MPI 的一部分,但不是 OpenMP 固有的。此外,MPI 为您提供主机进程和加速器之间的一对一关系,允许扩展到单个节点之外,并且您可以利用 CUDA Aware MPI 进行直接的 GPU 到 GPU 数据传输。有关更多信息,请在网络上搜索“MPI OpenACC”,您会找到一些教程。https://developer.nvidia.com/openacc-advanced-course上的Class #2是一个很好的资源。

% cat test.c
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#ifdef _OPENACC
#include <openacc.h>
#endif

#define SIZE 130

void multiplyMatrix(long long int matrixA[SIZE][SIZE], long long int matrixB[SIZE][SIZE], long long int matrixProduct[SIZE][SIZE])
{

#ifdef _OPENACC
    // Get Nvidia device type
    acc_init(acc_device_nvidia);
    // Get Number of GPUs in system
    int num_gpus = acc_get_num_devices(acc_device_nvidia);
#else
    int num_gpus = omp_get_max_threads();
#endif
    if (SIZE<num_gpus) {
        num_gpus=SIZE;
    }
    printf("Num Threads: %d\n",num_gpus);

    //Set the number of OpenMP thread to the number of GPUs
    #pragma omp parallel num_threads(num_gpus)
    {
        //Get thread openMP number and set the GPU device to that number
        int threadNum = omp_get_thread_num();
#ifdef _OPENACC
        acc_set_device_num(threadNum, acc_device_nvidia);
        printf("THID %d using GPU: %d\n",threadNum,threadNum);
#endif
        int row;
        int col;
        int key;
        int start, end;
        int block_size;
        block_size = SIZE/num_gpus;
        start = threadNum*block_size;
        end = start+block_size;
        if (threadNum==(num_gpus-1)) {
           // add the residual to the last thread
           end = SIZE;
        }
        printf("THID: %d, Start: %d End: %d\n",threadNum,start,end-1);

        #pragma acc parallel loop \
          copy(matrixProduct[start:end-start][:SIZE]), \
          copyin(matrixA[start:end-start][:SIZE],matrixB[:SIZE][:SIZE])
        for (row = start; row < end; row++) {
            #pragma acc loop vector
            for (col = 0; col < SIZE; col++) {
                for (key = 0; key < SIZE; key++) {
                    matrixProduct[row][col] = matrixProduct[row][col] + (matrixA[row][key] * matrixB[key][col]);
        }}}
    }
}

int main() {
   long long int matrixA[SIZE][SIZE];
   long long int matrixB[SIZE][SIZE];
   long long int matrixProduct[SIZE][SIZE];
   int i,j;
   for(i=0;i<SIZE;++i) {
     for(j=0;j<SIZE;++j) {
        matrixA[i][j] = (i*SIZE)+j;
        matrixB[i][j] = (j*SIZE)+i;
        matrixProduct[i][j]=0;
     }
   }
   multiplyMatrix(matrixA,matrixB,matrixProduct);
   printf("Result:\n");
   for(i=0;i<SIZE;++i) {
      printf("%d: %ld %ld\n",i,matrixProduct[i][0],matrixProduct[i][SIZE-1]);
   }

}
% pgcc test.c -mp -ta=tesla -Minfo=accel,mp
multiplyMatrix:
     28, Parallel region activated
     49, Generating copyin(matrixB[:130][:])
         Generating copy(matrixProduct[start:end-start][:131])
         Generating copyin(matrixA[start:end-start][:131])
         Generating Tesla code
         52, #pragma acc loop gang /* blockIdx.x */
         54, #pragma acc loop vector(128) /* threadIdx.x */
         55, #pragma acc loop seq
     54, Loop is parallelizable
     55, Complex loop carried dependence of matrixA->,matrixProduct->,matrixB-> prevents parallelization
         Loop carried dependence of matrixProduct-> prevents parallelization
         Loop carried backward dependence of matrixProduct-> prevents vectorization
     59, Parallel region terminated
% a.out
Num Threads: 4
THID 0 using GPU: 0
THID: 0, Start: 0 End: 31
THID 1 using GPU: 1
THID: 1, Start: 32 End: 63
THID 3 using GPU: 3
THID: 3, Start: 96 End: 129
THID 2 using GPU: 2
THID: 2, Start: 64 End: 95
Result:
0: 723905 141340355
1: 1813955 425843405
2: 2904005 710346455
3: 3994055 994849505
...
126: 138070205 35988724655
127: 139160255 36273227705
128: 140250305 36557730755
129: 141340355 36842233805
于 2019-04-22T18:46:58.240 回答
0

我在受限的共享系统上遇到了 MPI+OpenACC 编译问题,无法升级编译器。我最终使用的解决方案是首先使用 OMP 分解工作,然后调用 OpenACC 函数,如下所示:

//Main code
pragma omp parallel num_threads(num_gpus)
    {
        #pragma omp for private(tid)
        for (tid = 0; tid < num_gpus; tid++)
        {
            //Get thread openMP number and set the GPU device to that number
            int threadNum = omp_get_thread_num();
            acc_set_device_num(threadNum, acc_device_nvidia);

            // check with thread is using which GPU
            int gpu_num = acc_get_device_num(acc_device_nvidia);
            printf("Thread # %d is going to use GPU # %d \n", threadNum, gpu_num);

            //distribute the uneven rows
            if (threadNum < extraRows)
            {
                startRow = threadNum * (rowsPerThread + 1);
                stopRow = startRow + rowsPerThread;
            }
            else
            {
                startRow = threadNum * rowsPerThread + extraRows;
                stopRow = startRow + (rowsPerThread - 1);
            }
            // Debug to check allocation of data to threads
            //printf("Start row is %d, and Stop rows is %d \n", startRow, stopRow);

            GPUmultiplyMatrix(matrixA, matrixB, matrixProduct, startRow, stopRow);
        }
    }
void GPUmultiplyMatrix(long long int matrixA[SIZE][SIZE], long long int 
matrixB[SIZE][SIZE], long long int matrixProduct[SIZE][SIZE], int 
startRow, int stopRow)
    {
        int row;
        int col;
        int key;

        #pragma acc parallel loop collapse (2)
        for (row = startRow; row <= stopRow; row++)
            for (col = 0; col < SIZE; col++)
                for (key = 0; key < SIZE; key++)
                    matrixProduct[row][col] = matrixProduct[row][col] + (matrixA[row][key] * matrixB[key][col]);
    }
于 2020-01-02T19:55:11.407 回答