我尝试将以下代码转换为 AVX 内在函数以提高性能:
for (int alpha = 0; alpha < 4; alpha++) {
for (int k = 0; k < 3; k++) {
for (int beta = 0; beta < 4; beta++) {
for (int l = 0; l < 4 ; l++) {
d2_phi[(alpha*3+k)*16 + beta*4+l] =
- (d2_phi[(alpha*3+k)*16 + beta*dim+l]
+ b[k] * ( lam_12[ beta][alpha] * a[l]
+ lam_22[alpha][ beta] * b[l]
+ lam_23[alpha][ beta] * rjk[l] )
+ rjk[k] * ( lam_13[ beta][alpha] * a[l]
+ lam_23[ beta][alpha] * b[l]
+ lam_33[alpha][ beta] * rjk[l] )
) / sqrt_gamma;
}
}
}
}
并尝试了以下方式:
// load sqrt_gamma, because it is constant
__m256d ymm7 = _mm256_broadcast_sd(&sqrt_gamma);
for (int alpha=0; alpha < 4; alpha++) {
for (int k=0; k < 3; k++) {
// Load values that are only dependent on k
__m256d ymm9 = _mm256_broadcast_sd(b+k); // all b[k]
__m256d ymm8 = _mm256_broadcast_sd(rjk+k); // all rjk[k]
for (int beta=0; beta < 4; beta++) {
// Load the lambdas, because they will stay the same for nine iterations
__m256d ymm15 = _mm256_broadcast_sd(lam_12_p + 4*beta + alpha); // all lam_12[ beta][alpha]
__m256d ymm14 = _mm256_broadcast_sd(lam_22_p + 4*alpha + beta); // all lam_22[alpha][ beta]
__m256d ymm13 = _mm256_broadcast_sd(lam_23_p + 4*alpha + beta); // all lam_23[alpha][ beta]
__m256d ymm12 = _mm256_broadcast_sd(lam_13_p + 4*beta + alpha); // all lam_13[ beta][alpha]
__m256d ymm11 = _mm256_broadcast_sd(lam_23_p + 4*beta + alpha); // all lam_23[ beta][alpha]
__m256d ymm10 = _mm256_broadcast_sd(lam_33_p + 4*alpha + beta); // lam_33[alpha][ beta]
// Load the values that depend on the innermost loop, which is removed do to AVX
__m256d ymm6 =_mm256_load_pd(a); // a[i] until a[l+3]
__m256d ymm5 =_mm256_load_pd(b); // b[i] until b[l+3]
__m256d ymm4 =_mm256_load_pd(rjk); // rjk[i] until rjk[l+3]
//__m256d ymm3 =_mm256_load_pd(d2_phi_p + (alpha*3+k)*16 + beta*dim); // d2_phi[(alpha*3+k)*12 + beta*dim] until d2_phi[(alpha*3+k)*12 + beta*dim +3]
__m256d ymm3 =_mm256_load_pd(d2_phi_p + 4*s);
// Block that is later on multiplied with b[k]
__m256d ymm2 = _mm256_mul_pd(ymm15, ymm6); // lam_12[ beta][alpha] * a[l]
__m256d ymm1 = _mm256_mul_pd(ymm14, ymm5); // lam_22[alpha][ beta] * b[l];
__m256d ymm0 = _mm256_add_pd(ymm2, ymm1); // lam_12[ beta][alpha] * a[l] + lam_22[alpha][ beta]*b[l];
ymm2 = _mm256_mul_pd(ymm13, ymm4); // lam_23[alpha][ beta] * rjk[l]
ymm0 = _mm256_add_pd(ymm2, ymm0); // lam_12[ beta][alpha] * a[l] + lam_22[alpha][ beta]*b[l] + lam_23[alpha][ beta] * b[i];
ymm0 = _mm256_mul_pd(ymm9, ymm0); // b[k] * (first sum of three)
// Block that is later on multiplied with rjk[k]
ymm2 = _mm256_mul_pd(ymm12, ymm6); // lam_13[ beta][alpha] * a[l]
ymm1 = _mm256_mul_pd(ymm11, ymm5); // lam_23[ beta][alpha] * b[l]
ymm2 = _mm256_add_pd(ymm2, ymm1); // lam_13[ beta][alpha] * a[l] + lam_22[alpha][ beta]*b[l];
ymm1 = _mm256_mul_pd(ymm10, ymm4); // lam_33[alpha][ beta] * rjk[l]
ymm2 = _mm256_add_pd(ymm2, ymm1); // lam_13[ beta][alpha] * a[l] + lam_22[alpha][ beta]*b[l] + lam_33[alpha][ beta] *rjk[l]
ymm2 = _mm256_mul_pd(ymm2, ymm8); // rjk[k] * (second sum of three)
ymm0 = _mm256_add_pd(ymm0, ymm2); // add to temporal result in ymm0
ymm0 = _mm256_add_pd(ymm3, ymm0); // Old value of d2 Phi;
ymm0 = _mm256_div_pd(ymm0, ymm7); // all divided by sqrt_gamma
_mm256_store_pd(d2_phi_p + (alpha*3+k)*16 + beta*dim, ymm0);
}
}
}
但是性能很差。它甚至比英特尔编译器生成的自动矢量化代码还要慢。我尝试了以下事情:
- 所有数据数组都是 64 字节对齐的
__declspec(align(64))
- 最后的商店被流媒体商店取代
_mm256_stream_pd
当我查看创建的汇编代码时,我看到自动代码每次迭代都会获取所有参数(而不是像我所做的那样,仅在它们所属的循环中)。它还包含更多的算术运算。最后一点,最后的商店只需要我一半的时间(我重复代码片段 1000000 次),我看不出有什么理由。(我使用 Intel VTune Amplifier 来查看组装和花费的时间。)
提前感谢所有帮助!