我一直在使用 altivec 来实现基本的数学运算,作为为即将到来的项目学习 simd 的一种方式。此外,作为查看它的性能优势的一种方式,我跟踪执行操作需要多长时间,但我遇到了一些奇怪的事情。
我做的第一件事是将两个向量相加并减去两个向量。这工作正常。我做的下一件事是将两个向量相乘。然而,乘法比加法更快,即使根据我的特定 CPU 数据表中关于所使用指令的内容,使用更少的时钟周期来添加乘法。
我有两个每个 10MB 大的数组,并通过这两个例程运行它们:
void av_AddValues(int32_t* intArrayA, int32_t* intArrayB, int32_t* outputBuffer, int size)
{
int iterations = size / (sizeof(__vector int32_t) / sizeof(int32_t));
__vector int32_t* tempA = (__vector int32_t *) intArrayA;
__vector int32_t* tempB = (__vector int32_t *) intArrayB;
__vector int32_t* tempOut = (__vector int32_t *) outputBuffer;
for(int i = 0; i < iterations; i++)
{
__vector int32_t sum = vec_add(*tempA, *tempB);
vec_st(sum, 0, tempOut);
tempA++;
tempB++;
tempOut++;
}
}
void av_MultiplyValues(int16_t* intArrayA, int16_t* intArrayB, int32_t* outputBuffer, int size)
{
int iterations = size / (sizeof(__vector int16_t) / sizeof(int16_t));
__vector int16_t* tempA = (__vector int16_t *) intArrayA;
__vector int16_t* tempB = (__vector int16_t *) intArrayB;
__vector int32_t* tempOut = (__vector int32_t *) outputBuffer;
for(int i = 0; i < iterations; i++)
{
__vector int32_t productEven = vec_mule(*tempA, *tempB);
__vector int32_t productOdd = vec_mulo(*tempA, *tempB);
__vector int32_t mergedProductHigh = vec_mergeh(productEven, productOdd);
__vector int32_t mergedProductLow = vec_mergel(productEven, productOdd);
vec_st(mergedProductHigh, 0, tempOut);
tempOut++;
vec_st(mergedProductLow, 0, tempOut);
tempA++;
tempB++;
tempOut++;
}
}
在我的特定平台上,处理 av_AddValues 需要 81 毫秒,处理 av_MultiplyValues 需要 48 毫秒。(使用 std::chrono::high_resolution_clock 记录的时间)
为什么乘法比加法花费更少的时间来处理?
考虑到 __vector 类型总是处理 16 字节的数据,我认为添加 32 位值与乘以 16 位值并没有什么不同。
我的第一个想法是,由于将数字加在一起是一项微不足道的任务,CPU 完成操作的速度比它从内存中获取数据的速度要快。而对于乘法,这种获取延迟被 CPU 忙于工作并且永远不必等待这么久的事实所掩盖。
这是一个正确的假设吗?
完整代码:
#include <chrono>
#include <random>
#include <limits>
#include <iostream>
#include <cassert>
#include <cstring>
#include <cstdint>
#include <malloc.h>
#include <altivec.h>
#undef vector
void GenerateRandom16bitValues(int16_t* inputABuffer, int16_t* inputBBuffer, int32_t* outputBuffer, int size);
void GenerateRandom32bitValues(int32_t* inputABuffer, int32_t* inputBBuffer, int32_t* outputBuffer, int size);
void TestAdd();
void TestMultiply();
void av_AddValues(int32_t* intArrayA, int32_t* intArrayB, int32_t* outputBuffer, int size);
void av_MultiplyValues(int16_t* intArrayA, int16_t* intArrayB, int32_t* outputBuffer, int size);
int main()
{
TestAdd();
TestMultiply();
}
void GenerateRandom16bitValues(int16_t* inputABuffer, int16_t* inputBBuffer, int32_t* outputBuffer, int size)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(std::numeric_limits<int16_t>::min(), std::numeric_limits<int16_t>::max());
for(int i = 0; i < size; i++)
{
inputABuffer[i] = dis(gen);
inputBBuffer[i] = dis(gen);
outputBuffer[i] = 0;
}
}
void GenerateRandom32bitValues(int32_t* inputABuffer, int32_t* inputBBuffer, int32_t* outputBuffer, int size)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(std::numeric_limits<int32_t>::min(), std::numeric_limits<int32_t>::max());
for(int i = 0; i < size; i++)
{
inputABuffer[i] = dis(gen);
inputBBuffer[i] = dis(gen);
outputBuffer[i] = 0;
}
}
void TestAdd()
{
int size = 10'485'760;
int bytes = size * sizeof(int32_t);
int32_t* inputABuffer = (int32_t*) memalign(64, bytes);
int32_t* inputBBuffer = (int32_t*) memalign(64, bytes);
int32_t* outputBuffer = (int32_t*) memalign(64, bytes);
assert(inputABuffer != nullptr);
assert(inputBBuffer != nullptr);
assert(outputBuffer != nullptr);
GenerateRandom32bitValues(inputABuffer, inputBBuffer, outputBuffer, size);
for(int i = 0; i < 20; i++)
{
auto start = std::chrono::high_resolution_clock::now();
av_AddValues(inputABuffer, inputBBuffer, outputBuffer, size);
auto end = std::chrono::high_resolution_clock::now();
auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
for(int k = 0; k < size; k++)
{
assert(outputBuffer[k] == (inputABuffer[k] + inputBBuffer[k]));
}
std::cout << "Vector Sum - " << diff.count() << "ms\n";
memset(outputBuffer, 0, size);
}
}
void TestMultiply()
{
int size = 10'485'760;
int16_t* inputABuffer = (int16_t*) memalign(64, size * sizeof(int16_t));
int16_t* inputBBuffer = (int16_t*) memalign(64, size * sizeof(int16_t));
int32_t* outputBuffer = (int32_t*) memalign(64, size * sizeof(int32_t));
assert(inputABuffer != nullptr);
assert(inputBBuffer != nullptr);
assert(outputBuffer != nullptr);
GenerateRandom16bitValues(inputABuffer, inputBBuffer, outputBuffer, size);
for(int i = 0; i < 20; i++)
{
auto start = std::chrono::high_resolution_clock::now();
av_MultiplyValues(inputABuffer, inputBBuffer, outputBuffer, size);
auto end = std::chrono::high_resolution_clock::now();
auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
for(int k = 0; k < size; k++)
{
assert(outputBuffer[k] == (inputABuffer[k] * inputBBuffer[k]));
}
std::cout << "Vector product - " << diff.count() << "ms\n";
memset(outputBuffer, 0, size);
}
}
void av_AddValues(int32_t* intArrayA, int32_t* intArrayB, int32_t* outputBuffer, int size)
{
int iterations = size / (sizeof(__vector int32_t) / sizeof(int32_t));
__vector int32_t* tempA = (__vector int32_t *) intArrayA;
__vector int32_t* tempB = (__vector int32_t *) intArrayB;
__vector int32_t* tempOut = (__vector int32_t *) outputBuffer;
for(int i = 0; i < iterations; i++)
{
__vector int32_t sum = vec_add(*tempA, *tempB);
vec_st(sum, 0, tempOut);
tempA++;
tempB++;
tempOut++;
}
}
void av_MultiplyValues(int16_t* intArrayA, int16_t* intArrayB, int32_t* outputBuffer, int size)
{
int iterations = size / (sizeof(__vector int16_t) / sizeof(int16_t));
__vector int16_t* tempA = (__vector int16_t *) intArrayA;
__vector int16_t* tempB = (__vector int16_t *) intArrayB;
__vector int32_t* tempOut = (__vector int32_t *) outputBuffer;
for(int i = 0; i < iterations; i++)
{
__vector int32_t productEven = vec_mule(*tempA, *tempB);
__vector int32_t productOdd = vec_mulo(*tempA, *tempB);
__vector int32_t mergedProductHigh = vec_mergeh(productEven, productOdd);
__vector int32_t mergedProductLow = vec_mergel(productEven, productOdd);
vec_st(mergedProductHigh, 0, tempOut);
tempOut++;
vec_st(mergedProductLow, 0, tempOut);
tempA++;
tempB++;
tempOut++;
}
}
perf stat 和 perf 记录的输出:
Adding
Performance counter stats for './alti':
2151.146080 task-clock (msec) # 0.999 CPUs utilized
9 context-switches # 0.004 K/sec
0 cpu-migrations # 0.000 K/sec
30957 page-faults # 0.014 M/sec
3871497132 cycles # 1.800 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1504538891 instructions # 0.39 insns per cycle
234038234 branches # 108.797 M/sec
687912 branch-misses # 0.29% of all branches
270305159 L1-dcache-loads # 125.656 M/sec
79819113 L1-dcache-load-misses # 29.53% of all L1-dcache hits
<not supported> LLC-loads
<not supported> LLC-load-misses
2.152697186 seconds time elapsed
CPU Utilization
76.04% alti alti [.] av_AddValues
Multiply
Performance counter stats for './alti':
1583.016640 task-clock (msec) # 0.999 CPUs utilized
4 context-switches # 0.003 K/sec
0 cpu-migrations # 0.000 K/sec
20717 page-faults # 0.013 M/sec
2849050875 cycles # 1.800 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1520409634 instructions # 0.53 insns per cycle
179185029 branches # 113.192 M/sec
535437 branch-misses # 0.30% of all branches
205341530 L1-dcache-loads # 129.715 M/sec
27124936 L1-dcache-load-misses # 13.21% of all L1-dcache hits
<not supported> LLC-loads
<not supported> LLC-load-misses
1.584145737 seconds time elapsed
CPU Utilization
60.35% alti alti [.] av_MultiplyValues