我正在使用 cuBlas 为一些矩阵运算创建一个库。我首先实现了一个矩阵乘法
库头类的片段(.h 文件)
#include "cusolverDn.h" // NOLINT
#include "cuda_runtime.h" // NOLINT
#include "device_launch_parameters.h" // NOLINT
namespace perception_core {
namespace matrix_transform {
class CudaMatrixTransformations {
public:
CudaMatrixTransformations();
~CudaMatrixTransformations();
void MatrixMultiplicationDouble(double *A, double *B, double *C, const int m, const int k, const int n);
private:
// Cublas stuff
cudaError_t cudaStat1;
cudaError_t cudaStat2;
cublasHandle_t cublasH;
cublasStatus_t cublas_status;
};
} // namespace matrix_transform
} // namespace perception_core
#endif // LIB_CUDA_ROUTINES_INCLUDE_MATRIX_TRANSFORMS_H_
乘法库类实现的片段(.cu 文件)
// This calculates the matrix mult C(m,n) = A(m,k) * B(k,n)
void CudaMatrixTransformations::MatrixMultiplicationDouble(
double *A, double *B, double *C, int m, int k, const int n) {
// Calculate size of each array
size_t s_A = m * k;
size_t s_B = k * n;
size_t s_C = m * n;
// Create the arrays to use in the GPU
double *d_A = NULL;
double *d_B = NULL;
double *d_C = NULL;
// Allocate memory
cudaStat1 = cudaMallocManaged(&d_A, s_A * sizeof(double));
cudaStat2 = cudaMallocManaged(&d_B, s_B * sizeof(double));
assert(cudaSuccess == cudaStat1);
assert(cudaSuccess == cudaStat2);
cudaStat1 = cudaMallocManaged(&d_C, s_C * sizeof(double));
assert(cudaSuccess == cudaStat1);
// Copy the data to the device data
memcpy(d_A, A, s_A * sizeof(double));
memcpy(d_B, B, s_B * sizeof(double));
// Set up stuff for using CUDA
int lda = m;
int ldb = k;
int ldc = m;
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
cublas_status = cublasCreate(&cublasH);
assert(cublas_status == CUBLAS_STATUS_SUCCESS);
// Perform multiplication
cublas_status = cublasDgemm(cublasH, // CUDA handle
CUBLAS_OP_N, CUBLAS_OP_N, // no operation on matrices
m, n, k, // dimensions in the matrices
alpha, // scalar for multiplication
d_A, lda, // matrix d_A and its leading dim
d_B, ldb, // matrix d_B and its leading dim
beta, // scalar for multiplication
d_C, ldc // matrix d_C and its leading dim
);
cudaStat1 = cudaDeviceSynchronize();
assert(cublas_status == CUBLAS_STATUS_SUCCESS);
assert(cudaSuccess == cudaStat1);
// Destroy the handle
cublasDestroy(cublasH);
C = (double*)malloc(s_C * sizeof(double));
memcpy(C, d_C, s_C * sizeof(double));
// Make sure to free resources
if (d_A) cudaFree(d_A);
if (d_B) cudaFree(d_B);
if (d_C) cudaFree(d_C);
return;
}
CudaMatrixTransformations::CudaMatrixTransformations() {
cublas_status = CUBLAS_STATUS_SUCCESS;
cudaStat1 = cudaSuccess;
cudaStat2 = cudaSuccess;
}
然后我创建了一个 gtest 程序来测试我的功能。我在函数中传递了 adouble *result = NULL;
作为C
参数MatrixMultiplicationDouble
。
gtest 程序片段(.cc 文件)
TEST_F(MatrixTransformsTest, MatrixMultiplication) {
double loc_q[] = {3, 4, 5, 6, 7 ,8};
double *q = loc_q;
double loc_w[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
double *w = loc_w;
double *result = NULL;
double loc_result[M_ROWS * M_COLS] = {14, 50, 86, 122, 23, 86, 149, 212};
matrix_result = loc_result;
size_t m = 4;
size_t k = 3;
size_t n = 2;
perception_core::matrix_transform::CudaMatrixTransformations transforms;
transforms.MatrixMultiplicationDouble(w, q, result, m, k, n);
auto rr = std::addressof(result);
printf("\nC addr: %p\n", rr);
std::cout << "result:\n";
print_matrix(result, m, n);
EXPECT_TRUE(compare<double>(result, matrix_result, m * n));
}
cuBlas 中的例程工作正常,因为当我在.cu
文件中打印矩阵时可以看到结果。但是,当我尝试访问result
我的 gtest 文件时,我遇到了段错误。经过进一步检查,我注意到result
指针的地址在.cu
. .cpp
作为一个例子,我得到:
C addr: 0x7ffc5749db08 (inside .cu)
C addr: 0x7ffc5749dba0 (inside .cpp)
我认为通过使用统一内存,我可以从主机或设备访问该指针。我似乎无法找到有关此地址为何更改并修复 seg 故障问题的答案。使用统一内存有什么我遗漏的吗?谢谢!