我有以下代码和
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <ctime>
#include <vector>
#include <numeric>
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
std::vector<float> add(float alpha, std::vector<float>& v1, std::vector<float>& v2 )
{ /*Do quick size check on vectors before proceeding*/
std::vector<float> result(v1.size());
for (unsigned int i = 0; i < result.size(); ++i)
{
result[i]=alpha*v1[i]+v2[i];
}
return result;
}
__global__ void Addloop( int N, float alpha, float* x, float* y ) {
int i;
int i0 = blockIdx.x*blockDim.x + threadIdx.x;
for( i = i0; i < N; i += blockDim.x*gridDim.x )
y[i] = alpha*x[i] + y[i];
/*
if ( i0 < N )
y[i0] = alpha*x[i0] + y[i0];
*/
}
int main( int argc, char** argv ) {
float alpha = 0.3;
// create array of 256k elements
int num_elements = 10;//1<<18;
// generate random input on the host
std::vector<float> h1_input(num_elements);
std::vector<float> h2_input(num_elements);
for(int i = 0; i < num_elements; ++i)
{
h1_input[i] = random_float();
h2_input[i] = random_float();
}
for (std::vector<float>::iterator it = h1_input.begin() ; it != h1_input.end(); ++it)
std::cout << ' ' << *it;
std::cout << '\n';
for (std::vector<float>::iterator it = h2_input.begin() ; it != h2_input.end(); ++it)
std::cout << ' ' << *it;
std::cout << '\n';
std::vector<float> host_result;//(std::vector<float> h1_input, std::vector<float> h2_input );
host_result = add( alpha, h1_input, h2_input );
for (std::vector<float>::iterator it = host_result.begin() ; it != host_result.end(); ++it)
std::cout << ' ' << *it;
std::cout << '\n';
// move input to device memory
float *d1_input = 0;
cudaMalloc((void**)&d1_input, sizeof(float) * num_elements);
cudaMemcpy(d1_input, &h1_input[0], sizeof(float) * num_elements, cudaMemcpyHostToDevice);
float *d2_input = 0;
cudaMalloc((void**)&d2_input, sizeof(float) * num_elements);
cudaMemcpy(d2_input, &h2_input[0], sizeof(float) * num_elements, cudaMemcpyHostToDevice);
Addloop<<<1,3>>>( num_elements, alpha, d1_input, d2_input );
// copy the result back to the host
std::vector<float> device_result(num_elements);
cudaMemcpy(&device_result[0], d2_input, sizeof(float) * num_elements, cudaMemcpyDeviceToHost);
for (std::vector<float>::iterator it = device_result.begin() ; it != device_result.end(); ++it)
std::cout << ' ' << *it;
std::cout << '\n';
cudaFree(d1_input);
cudaFree(d2_input);
h1_input.clear();
h2_input.clear();
device_result.clear();
std::cout << "DONE! \n";
getchar();
return 0;
}
我正在尝试了解 gpu 内存访问。为简单起见,内核以Addloop<<<1,3>>>的形式启动。我试图通过将在 gpu 上工作的 for 循环想象为实例来理解这段代码是如何工作的。更具体地说,我想象以下实例,但它们没有帮助。
实例 1:
for( i = 0; i < N; i += 3*1 ) // ( i += 0*1 --> i += 3*1 after Eric's comment)
y[i] = alpha*x[i] + y[i];
实例 2:
for( i = 1; i < N; i += 3*1 )
y[i] = alpha*x[i] + y[i];
实例 3:
for( i = 3; i < N; i += 3*1 )
y[i] = alpha*x[i] + y[i];
查看每个循环的内部,添加两个向量的逻辑没有任何意义。有人可以帮忙吗?
我采用这种实例逻辑的原因是因为它在注释中的内核内部代码的情况下运行良好。
如果这些想法是正确的,如果我们在网格中有多个块,会是什么情况?换句话说,在某些示例中 ,i
值和更新率 ( ) 是多少?+=updaterate
PS:内核代码是从这里借来的。
更新:
在埃里克的回答之后,我认为执行N = 15
ei 元素的数量的执行是这样的(如果我错了,请纠正我):
对于上面的实例 1i = 0 , 3, 6, 9, 12
,它计算相应的y[i]
值。对于上面的实例 2i = 1 , 4, 7, 10, 13
,它计算相应的剩余y[i]
值。对于上面计算剩余值的实例 3 。i = 2 , 5, 8, 11, 14
y[i]