我是 C++ 的新手,听说eigen、blaze、Fastor和Xtensor 等具有惰性求值和 simd 的库对于矢量化操作来说很快。
我通过以下函数测量了一些进行基本数值运算的时间:
(快速)
using namespace Fastor;
template<typename T, size_t num>
T func2(Tensor<T,num> &u) {
Tensor<T,num> z;
for (auto k=0; k<100; ++k){
z = u * u;
z /= exp(u+u);
z *= 1.;
z *= sin(u) * cos(z);
}
return z(last);
}
(Xtensor)
template<typename T, size_t num>
T func2(xt::xtensor_fixed<T, xt::xshape<num>> &u) {
xt::xtensor_fixed<T, xt::xshape<num>> z;
for (auto k=0; k<100; ++k){
z = u * u;
z /= xt::exp(u+u);
z *= 1.;
z *= xt::sin(u) * xt::cos(z);
}
return z(0);
}
编译标志:
(快速)
-std=c++14 -O3 -march=native -funroll-loops -DNDEBUG -mllvm -inline-threshold=10000000 -ffp-contract=fast -mfma -I/Path/to/Fastor -DFASTOR_NO_ALIAS -DFASTOR_DISPATCH_DIV_TO_MUL_EXPR
(Xtensor)
-std=c++14 -O3 -march=native -funroll-loops -DNDEBUG -mllvm -inline-threshold=10000000 -ffp-contract=fast -mfma -I/Path/to/xsimd/include/ -I/Path/to/xtl/include/ -I/Path/to/xtensor/include/ -I/Path/to/xtensor-blas/include/ -DXTENSOR_USE_XSIMD -lblas -llapack -DHAVE_CBLAS=1
编译器:Apple LLVM version 10.0.0 (clang-1000.11.45.5)
处理器:2.6 GHz Intel Core i5
为了比较,我还测量了用 python 编写的函数,它优化了numba.vectorize
@numba.vectorize(['float64(float64)'],nopython=True)
def func(x):
for k in range(100):
z = x * x
z /= np.exp(x + x)
z *= 1.0
z *= np.sin(x) * np.cos(x)
return z
结果(以 usec 为单位)表明
---------------------------------------
num | Fastor | Xtensor | numba
---------------------------------------
100 | 286 | 201 | 13
1000 | 2789 | 1202 | 65
10000 | 29288 | 20468 | 658
100000 | 328033 | 165263 | 3166
---------------------------------------
我做错什么了吗?Fastor 和 Xtensor 怎么会慢 50 倍。
如何通过使用auto
关键字来使用表达式模板和惰性求值?
谢谢你的帮助!
@Jérôme Richard 感谢您的帮助!
有趣的是,Fastor 和 Xtensor 无法忽略冗余的 for 循环。无论如何,我对每个数字运算进行了更公平的比较。
SIMD 的因子 2 也很有意义。
(快速)
template<typename T, size_t num>
T func_exp(Tensor<T,num> &u) {
Tensor<T,num> z=u;
for (auto k=0; k<100; ++k){
z += exp( u );
}
return z(0);
}
template<typename T, size_t num>
T func_sin(Tensor<T,num> &u) {
Tensor<T,num> z=u;
for (auto k=0; k<100; ++k){
z += sin( u );
}
return z(0);
}
template<typename T, size_t num>
T func_cos(Tensor<T,num> &u) {
Tensor<T,num> z=u;
for (auto k=0; k<100; ++k){
z += cos( u );
}
return z(0);
}
template<typename T, size_t num>
T func_add(Tensor<T,num> &u) {
Tensor<T,num> z=u;
for (auto k=0; k<100; ++k){
z += u;
}
return z(0);
}
template<typename T, size_t num>
T func_mul(Tensor<T,num> &u) {
Tensor<T,num> z=u;
for (auto k=0; k<100; ++k){
z *= u;
}
return z(0);
}
template<typename T, size_t num>
T func_div(Tensor<T,num> &u) {
Tensor<T,num> z=u;
for (auto k=0; k<100; ++k){
z /= u;
}
return z(0);
}
(Xtensor)
template<typename T, size_t nn>
T func_exp(xt::xtensor_fixed<T, xt::xshape<nn>> &u) {
xt::xtensor_fixed<T, xt::xshape<nn>> z=u;
for (auto k=0; k<100; ++k){
z += xt::exp( u );
}
return z(0);
}
template<typename T, size_t nn>
T func_sin(xt::xtensor_fixed<T, xt::xshape<nn>> &u) {
xt::xtensor_fixed<T, xt::xshape<nn>> z=u;
for (auto k=0; k<100; ++k){
z += xt::sin( u );
}
return z(0);
}
template<typename T, size_t nn>
T func_cos(xt::xtensor_fixed<T, xt::xshape<nn>> &u) {
xt::xtensor_fixed<T, xt::xshape<nn>> z=u;
for (auto k=0; k<100; ++k){
z += xt::sin( u );
}
return z(0);
}
template<typename T, size_t nn>
T func_add(xt::xtensor_fixed<T, xt::xshape<nn>> &u) {
xt::xtensor_fixed<T, xt::xshape<nn>> z=u;
for (auto k=0; k<100; ++k){
z += u;
}
return z(0);
}
template<typename T, size_t nn>
T func_mul(xt::xtensor_fixed<T, xt::xshape<nn>> &u) {
xt::xtensor_fixed<T, xt::xshape<nn>> z=u;
for (auto k=0; k<100; ++k){
z *= u;
}
return z(0);
}
template<typename T, size_t nn>
T func_div(xt::xtensor_fixed<T, xt::xshape<nn>> &u) {
xt::xtensor_fixed<T, xt::xshape<nn>> z=u;
for (auto k=0; k<100; ++k){
z /= u;
}
return z(0);
}
(努巴)
@numba.vectorize(['float64(float64)'],nopython=True)
def func_exp(u):
z = u
for k in range(100):
z += exp(u)
return z
@numba.vectorize(['float64(float64)'],nopython=True)
def func_sin(u):
z = u
for k in range(100):
z += sin(u)
return z
@numba.vectorize(['float64(float64)'],nopython=True)
def func_cos(u):
z = u
for k in range(100):
z += cos(u)
return z
@numba.vectorize(['float64(float64)'],nopython=True)
def func_add(u):
z = u
for k in range(100):
z += u
return z
@numba.vectorize(['float64(float64)'],nopython=True)
def func_mul(u):
z = u
for k in range(100):
z *= u
return z
@numba.vectorize(['float64(float64)'],nopython=True)
def func_div(u):
z = u
for k in range(100):
z *= u
return z
结果显示
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
unit [1E-6 sec] | exp | sin | cos | add | mul | div |
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| F | X | N | F | X | N | F | X | N | F | X | N | F | X | N | F | X | N |
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
n=100 | 135/135 | 38/38 | 10 | 162/162 | 65/32 | 9 | 111/110 | 34/58 | 9 | 0.07 | 0.06 | 6.2 | 0.06 | 0.05 | 9.6 | 0.06 | 0.05 | 9.6 |
n=1000 | 850/858 | 501/399 | 110 | 1004/961| 522/491 | 94 | 917/1021| 486/450 | 92 | 20 | 43 | 57 | 22 | 40 | 91 | 279 | 275 | 91 |
n=10000 | 8113 | 4160 | 830 | 10670 | 4052 | 888 | 10094 | 3436 | 1063 | 411 | 890 | 645 | 396 | 922 | 1011 | 2493 | 2735 | 914 |
n=100000 | 84032 | 46173 | 8743 | 104808 | 48203 | 8745 | 102868 | 53948 | 8958 | 6138 | 18803 | 5672 | 6039 | 13851 | 9204 | 23404 | 33485 | 9149 |
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
像这样的格式135/135
表示结果。without/with
-ffast-math
事实证明
- Fastor/Xtensor 在 , , 中的表现非常糟糕
exp
,sin
这cos
令人惊讶。 +=
Fastor/Xtensor 在,*=
,中的缩放比 Numba 差/=
。
这是 Fastor/Xtensor 的本质吗?
我将表达式修改为
template<typename T, size_t num>
auto func_exp2(Tensor<T,num> &u) {
Tensor<T,num> z=u + 100. * exp(u);;
return z;
}
template<typename T, size_t nn>
auto func_exp2(xt::xtensor_fixed<T, xt::xshape<nn>> &u) {
xt::xtensor_fixed<T, xt::xshape<nn>> z=u + 100.*xt::exp(u);
return z;
}
@numba.vectorize(['float64(float64)'],nopython=True)
def func_exp2(u):
z = u + 100 * exp(u)
return z
它给了
-----------------------------------------------------------------
unit [1E-6 sec] | Fastor | Xtensor | Numba |
-----------------------------------------------------------------
n=100 | 0.100 | 0.066 | 1.8 |
n=1000 | 0.073 | 0.057 | 3.6 |
n=10000 | 0.086 | 0.089 | 26.7 |
n=100000 | 0.056 | 0.065 | 275.7 |
-----------------------------------------------------------------
发生了什么?
- 为什么 Fastor/Xtensor 无法
100*exp(u)
通过惰性评估将 for 循环表达为天真的? - 为什么随着张量大小的增加 Fastor/Xtensor 变得更快?