我目前正在设计一个简短的教程,展示 Thrust 模板库的各个方面和功能。
不幸的是,我为展示如何使用 cuda 流使用复制/计算并发而编写的代码中似乎存在问题。
我的代码可以在这里找到,在 asynchronousLaunch 目录中: https ://github.com/gnthibault/Cuda_Thrust_Introduction/tree/master/AsynchronousLaunch
以下是产生问题的代码摘要:
//STL
#include <cstdlib>
#include <algorithm>
#include <iostream>
#include <vector>
#include <functional>
//Thrust
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
//Cuda
#include <cuda_runtime.h>
//Local
#include "AsynchronousLaunch.cu.h"
int main( int argc, char* argv[] )
{
const size_t fullSize = 1024*1024*64;
const size_t halfSize = fullSize/2;
//Declare one host std::vector and initialize it with random values
std::vector<float> hostVector( fullSize );
std::generate(hostVector.begin(), hostVector.end(), normalRandomFunctor<float>(0.f,1.f) );
//And two device vector of Half size
thrust::device_vector<float> deviceVector0( halfSize );
thrust::device_vector<float> deviceVector1( halfSize );
//Declare and initialize also two cuda stream
cudaStream_t stream0, stream1;
cudaStreamCreate( &stream0 );
cudaStreamCreate( &stream1 );
//Now, we would like to perform an alternate scheme copy/compute
for( int i = 0; i < 10; i++ )
{
//Wait for the end of the copy to host before starting to copy back to device
cudaStreamSynchronize(stream0);
//Warning: thrust::copy does not handle asynchronous behaviour for host/device copy, you must use cudaMemcpyAsync to do so
cudaMemcpyAsync(thrust::raw_pointer_cast(deviceVector0.data()), thrust::raw_pointer_cast(hostVector.data()), halfSize*sizeof(float), cudaMemcpyHostToDevice, stream0);
cudaStreamSynchronize(stream1);
//second copy is most likely to occur sequentially after the first one
cudaMemcpyAsync(thrust::raw_pointer_cast(deviceVector1.data()), thrust::raw_pointer_cast(hostVector.data())+halfSize, halfSize*sizeof(float), cudaMemcpyHostToDevice, stream1);
//Compute on device, here inclusive scan, for histogram equalization for instance
thrust::transform( thrust::cuda::par.on(stream0), deviceVector0.begin(), deviceVector0.end(), deviceVector0.begin(), computeFunctor<float>() );
thrust::transform( thrust::cuda::par.on(stream1), deviceVector1.begin(), deviceVector1.end(), deviceVector1.begin(), computeFunctor<float>() );
//Copy back to host
cudaMemcpyAsync(thrust::raw_pointer_cast(hostVector.data()), thrust::raw_pointer_cast(deviceVector0.data()), halfSize*sizeof(float), cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(thrust::raw_pointer_cast(hostVector.data())+halfSize, thrust::raw_pointer_cast(deviceVector1.data()), halfSize*sizeof(float), cudaMemcpyDeviceToHost, stream1);
}
//Full Synchronize before exit
cudaDeviceSynchronize();
cudaStreamDestroy( stream0 );
cudaStreamDestroy( stream1 );
return EXIT_SUCCESS;
}
以下是通过 nvidia 可视化配置文件观察到的一个程序实例的结果:
如您所见,cudamemcopy(棕色)都发布到流 13 和 14,但是 Thrust 从thrust::transform 生成的内核被发布到默认流(捕获中的蓝色)
顺便说一句,我使用的是 cuda 工具包版本 7.0.28,带有 GTX680 和 gcc 4.8.2。
如果有人能告诉我我的代码有什么问题,我将不胜感激。
先感谢您
编辑:这是我认为作为解决方案的代码:
//STL
#include <cstdlib>
#include <algorithm>
#include <iostream>
#include <functional>
#include <vector>
//Thrust
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
//Cuda
#include <cuda_runtime.h>
//Local definitions
template<typename T>
struct computeFunctor
{
__host__ __device__
computeFunctor() {}
__host__ __device__
T operator()( T in )
{
//Naive functor that generates expensive but useless instructions
T a = cos(in);
for(int i = 0; i < 350; i++ )
{
a+=cos(in);
}
return a;
}
};
int main( int argc, char* argv[] )
{
const size_t fullSize = 1024*1024*2;
const size_t nbOfStrip = 4;
const size_t stripSize = fullSize/nbOfStrip;
//Allocate host pinned memory in order to use asynchronous api and initialize it with random values
float* hostVector;
cudaMallocHost(&hostVector,fullSize*sizeof(float));
std::fill(hostVector, hostVector+fullSize, 1.0f );
//And one device vector of the same size
thrust::device_vector<float> deviceVector( fullSize );
//Declare and initialize also two cuda stream
std::vector<cudaStream_t> vStream(nbOfStrip);
for( auto it = vStream.begin(); it != vStream.end(); it++ )
{
cudaStreamCreate( &(*it) );
}
//Now, we would like to perform an alternate scheme copy/compute in a loop using the copyToDevice/Compute/CopyToHost for each stream scheme:
for( int i = 0; i < 5; i++ )
{
for( int j=0; j!=nbOfStrip; j++)
{
size_t offset = stripSize*j;
size_t nextOffset = stripSize*(j+1);
cudaStreamSynchronize(vStream.at(j));
cudaMemcpyAsync(thrust::raw_pointer_cast(deviceVector.data())+offset, hostVector+offset, stripSize*sizeof(float), cudaMemcpyHostToDevice, vStream.at(j));
thrust::transform( thrust::cuda::par.on(vStream.at(j)), deviceVector.begin()+offset, deviceVector.begin()+nextOffset, deviceVector.begin()+offset, computeFunctor<float>() );
cudaMemcpyAsync(hostVector+offset, thrust::raw_pointer_cast(deviceVector.data())+offset, stripSize*sizeof(float), cudaMemcpyDeviceToHost, vStream.at(j));
}
}
//On devices that do not possess multiple queues copy engine capability, this solution serializes all command even if they have been issued to different streams
//Why ? Because in the point of view of the copy engine, which is a single ressource in this case, there is a time dependency between HtoD(n) and DtoH(n) which is ok, but there is also
// a false dependency between DtoH(n) and HtoD(n+1), that preclude any copy/compute overlap
//Full Synchronize before testing second solution
cudaDeviceSynchronize();
//Now, we would like to perform an alternate scheme copy/compute in a loop using the copyToDevice for each stream /Compute for each stream /CopyToHost for each stream scheme:
for( int i = 0; i < 5; i++ )
{
for( int j=0; j!=nbOfStrip; j++)
{
cudaStreamSynchronize(vStream.at(j));
}
for( int j=0; j!=nbOfStrip; j++)
{
size_t offset = stripSize*j;
cudaMemcpyAsync(thrust::raw_pointer_cast(deviceVector.data())+offset, hostVector+offset, stripSize*sizeof(float), cudaMemcpyHostToDevice, vStream.at(j));
}
for( int j=0; j!=nbOfStrip; j++)
{
size_t offset = stripSize*j;
size_t nextOffset = stripSize*(j+1);
thrust::transform( thrust::cuda::par.on(vStream.at(j)), deviceVector.begin()+offset, deviceVector.begin()+nextOffset, deviceVector.begin()+offset, computeFunctor<float>() );
}
for( int j=0; j!=nbOfStrip; j++)
{
size_t offset = stripSize*j;
cudaMemcpyAsync(hostVector+offset, thrust::raw_pointer_cast(deviceVector.data())+offset, stripSize*sizeof(float), cudaMemcpyDeviceToHost, vStream.at(j));
}
}
//On device that do not possess multiple queues in the copy engine, this solution yield better results, on other, it should show nearly identic results
//Full Synchronize before exit
cudaDeviceSynchronize();
for( auto it = vStream.begin(); it != vStream.end(); it++ )
{
cudaStreamDestroy( *it );
}
cudaFreeHost( hostVector );
return EXIT_SUCCESS;
}
使用 nvcc 编译 ./test.cu -o ./test.exe -std=c++11