我想用 cublasDgemm() 替换对“cblas_dgemm()”的调用。这是 Shark 机器学习库的原始包装器:
inline void gemm(
CBLAS_ORDER const Order, CBLAS_TRANSPOSE TransA, CBLAS_TRANSPOSE TransB,
int M, int N, int K,
double alpha, double const *A, int lda,
double const *B, int ldb,
double beta, double *C, int ldc
){
cblas_dgemm(
Order, TransA, TransB,
M, N, K,
alpha,
A, lda,
B, ldb,
beta,
C, ldc
);
}
这是使用 OpenAcc 编译指示修改后的代码:
inline void gemm(
CBLAS_ORDER const Order, CBLAS_TRANSPOSE TransA, CBLAS_TRANSPOSE TransB,
int M, int N, int K,
double alpha, double const *A, int lda,
double const *B, int ldb,
double beta, double *C, int ldc
){
#ifdef _OPENACC
cublasOperation_t OpT_A, OpT_B;
switch (TransA)
{
case CblasNoTrans:
OpT_A = CUBLAS_OP_N;
break;
case CblasTrans:
OpT_A = CUBLAS_OP_T;
break;
case CblasConjTrans:
OpT_A = CUBLAS_OP_C;
break;
default:
OpT_A = CUBLAS_OP_N;
}
switch (TransB)
{
case CblasNoTrans:
OpT_B = CUBLAS_OP_N;
break;
case CblasTrans:
OpT_B = CUBLAS_OP_T;
break;
case CblasConjTrans:
OpT_B = CUBLAS_OP_C;
break;
default:
OpT_B = CUBLAS_OP_N;
}
cublasHandle_t handle;
#pragma acc data copyin(OpT_A, OpT_B, M, N, K, alpha, A[0:M][0:K], lda, B[0:K][0:N], ldb, beta, ldc) copy(C[0:M][0:N])
{
#pragma acc host_data use_device(handle,OpT_A, OpT_B, A, B, C, M, N, K, lda, ldb, ldc, alpha, beta)
{
cublasDgemm(handle,OpT_A,OpT_B,M,N,K,&alpha,A,lda,B,ldb,&beta,C,ldc);
}
}
#else
cblas_dgemm(
Order, TransA, TransB,
M, N, K,
alpha,
A, lda,
B, ldb,
beta,
C, ldc
);
#endif
}
问题是当我用 OpenAcc 标志编译代码时,结果矩阵的元素,即 C,在内核执行之前和之后都是零。我不确定我在这里缺少什么。我很感激任何帮助。