几点建议:
- 正如评论中所建议的,将内部 128 元素向量替换为数组以获得更好的内存局部性。
- 这段代码看起来高度可并行化,你试过吗?您可以拆分组合以在所有可用内核上进行过滤,然后重新平衡收集的工作并将处理拆分到所有内核上。
我使用数组实现了一个版本,用于内部 128 个元素,PPL 用于并行化(需要 VS 2012 或更高版本)和一些 SSE 代码用于过滤,并获得了相当显着的加速。根据“进一步处理”所涉及的具体内容,稍微不同地构建事物可能会带来好处(例如,在此示例中,我不会在过滤后重新平衡工作)。
更新:我实现了 Ben Voigt 建议的缓存阻塞,并获得了更多的加速。
#include <vector>
#include <array>
#include <random>
#include <limits>
#include <cstdint>
#include <iostream>
#include <numeric>
#include <chrono>
#include <iterator>
#include <ppl.h>
#include <immintrin.h>
using namespace std;
using namespace concurrency;
namespace {
const int outerVecSize = 20000;
const int innerVecSize = 128;
const int LIMIT = 16000;
auto engine = default_random_engine();
};
typedef vector<uint16_t> InnerVec;
typedef array<uint16_t, innerVecSize> InnerArr;
template <typename Cont> void randomFill(Cont& c) {
// We want approx 0.1% to pass filter, the mean and standard deviation are chosen to get close to that
static auto dist = normal_distribution<>(LIMIT / 4.0, LIMIT / 4.6);
generate(begin(c), end(c), [] {
auto clamp = [](double x, double minimum, double maximum) { return min(max(minimum, x), maximum); };
return static_cast<uint16_t>(clamp(dist(engine), 0.0, numeric_limits<uint16_t>::max()));
});
}
void resizeInner(InnerVec& v) { v.resize(innerVecSize); }
void resizeInner(InnerArr& a) {}
template <typename Inner> Inner generateRandomInner() {
auto inner = Inner();
resizeInner(inner);
randomFill(inner);
return inner;
}
template <typename Inner> vector<Inner> generateRandomInput() {
auto outer = vector<Inner>(outerVecSize);
generate(begin(outer), end(outer), generateRandomInner<Inner>);
return outer;
}
void Report(const chrono::high_resolution_clock::duration elapsed, size_t in1Size, size_t in2Size,
const int passedFilter, const uint32_t specialValue) {
cout << passedFilter << "/" << in1Size* in2Size << " ("
<< 100.0 * (double(passedFilter) / double(in1Size * in2Size)) << "%) passed filter\n";
cout << specialValue << "\n";
cout << "Elapsed time = " << chrono::duration_cast<chrono::milliseconds>(elapsed).count() << "ms" << endl;
}
void TestOriginalVersion() {
cout << __FUNCTION__ << endl;
engine.seed();
const auto v1 = generateRandomInput<InnerVec>();
const auto v2 = generateRandomInput<InnerVec>();
int passedFilter = 0;
uint32_t specialValue = 0;
auto startTime = chrono::high_resolution_clock::now();
for (size_t i1 = 0; i1 < v1.size(); ++i1) { // v1.size() and v2.size() about 20000
for (size_t i2 = 0; i2 < v2.size(); ++i2) {
const vector<uint16_t>& a = v1[i1];
const vector<uint16_t>& b = v2[i2];
bool good = true;
for (std::size_t k = 0; k < 128; ++k) {
if (static_cast<int>(a[k]) + static_cast<int>(b[k])
> LIMIT) { // LIMIT is a const uint16_t: approx 16000
good = false;
break;
}
}
if (!good) continue;
// Further processing involving i1 and i2
++passedFilter;
specialValue += inner_product(begin(a), end(a), begin(b), 0);
}
}
auto endTime = chrono::high_resolution_clock::now();
Report(endTime - startTime, v1.size(), v2.size(), passedFilter, specialValue);
}
bool needsProcessing(const InnerArr& a, const InnerArr& b) {
static_assert(sizeof(a) == sizeof(b) && (sizeof(a) % 16) == 0, "Array size must be multiple of 16 bytes.");
static const __m128i mmLimit = _mm_set1_epi16(LIMIT);
static const __m128i mmLimitPlus1 = _mm_set1_epi16(LIMIT + 1);
static const __m128i mmOnes = _mm_set1_epi16(-1);
auto to_m128i = [](const uint16_t* p) { return reinterpret_cast<const __m128i*>(p); };
return equal(to_m128i(a.data()), to_m128i(a.data() + a.size()), to_m128i(b.data()), [&](const __m128i& a, const __m128i& b) {
// avoid overflow due to signed compare by clamping sum to LIMIT + 1
const __m128i clampSum = _mm_min_epu16(_mm_adds_epu16(a, b), mmLimitPlus1);
return _mm_test_all_zeros(_mm_cmpgt_epi16(clampSum, mmLimit), mmOnes);
});
}
void TestArrayParallelVersion() {
cout << __FUNCTION__ << endl;
engine.seed();
const auto v1 = generateRandomInput<InnerArr>();
const auto v2 = generateRandomInput<InnerArr>();
combinable<int> passedFilterCombinable;
combinable<uint32_t> specialValueCombinable;
auto startTime = chrono::high_resolution_clock::now();
const size_t blockSize = 64;
parallel_for(0u, v1.size(), blockSize, [&](size_t i) {
for (const auto& b : v2) {
const auto blockBegin = begin(v1) + i;
const auto blockEnd = begin(v1) + min(v1.size(), i + blockSize);
for (auto it = blockBegin; it != blockEnd; ++it) {
const InnerArr& a = *it;
if (!needsProcessing(a, b))
continue;
// Further processing involving a and b
++passedFilterCombinable.local();
specialValueCombinable.local() += inner_product(begin(a), end(a), begin(b), 0);
}
}
});
auto passedFilter = passedFilterCombinable.combine(plus<int>());
auto specialValue = specialValueCombinable.combine(plus<uint32_t>());
auto endTime = chrono::high_resolution_clock::now();
Report(endTime - startTime, v1.size(), v2.size(), passedFilter, specialValue);
}
int main() {
TestOriginalVersion();
TestArrayParallelVersion();
}
在我的 8 核系统上,我看到了相当不错的加速,你的结果会根据你拥有的核心数量等而有所不同。
TestOriginalVersion
441579/400000000 (0.110395%) passed filter
2447300015
Elapsed time = 12525ms
TestArrayParallelVersion
441579/400000000 (0.110395%) passed filter
2447300015
Elapsed time = 657ms