9

我正在 Intel Xeon® Phi® 上实现超快速 popcount,因为它是各种生物信息学软件的性能热点。

我已经实现了五段代码,

#if defined(__MIC__)
#include <zmmintrin.h>
__attribute__((align(64))) static const uint32_t POPCOUNT_4bit[16] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
__attribute__((align(64))) static const uint32_t MASK_4bit[16] = {0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF, 0xF};
inline uint64_t vpu_popcount1(uint64_t* buf, size_t n)  {
    register size_t result = 0;
    size_t i;
    register const __m512i popcnt = _mm512_load_epi32((void*)POPCOUNT_4bit);
    register const __m512i mask = _mm512_load_epi32((void*)MASK_4bit);
    register __m512i total;
    register __m512i shuf;

#pragma unroll(8)
    for (i = 0; i < n; i+=8) {
        shuf = _mm512_load_epi32(&buf[i]);
        _mm_prefetch((const char *)&buf[i+256], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
        total = _mm512_setzero_epi32();

        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(shuf, mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 4),  mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 8),  mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 12), mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 16), mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 20), mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 24), mask), popcnt), total);
        total = _mm512_add_epi32(_mm512_permutevar_epi32(_mm512_and_epi32(_mm512_srli_epi32(shuf, 28), mask), popcnt), total);

        /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
           is not implementated as a single instruction in VPUv1, thus
           emulated by multiple instructions*/
        result += _mm512_reduce_add_epi32(total);
    }

    return result;
}

__attribute__((align(64))) static const unsigned magic[] = {\
        0x55555555, 0x55555555, 0x55555555, 0x55555555,\
        0x55555555, 0x55555555, 0x55555555, 0x55555555,\
        0x55555555, 0x55555555, 0x55555555, 0x55555555,\
        0x55555555, 0x55555555, 0x55555555, 0x55555555,\
        0x33333333, 0x33333333, 0x33333333, 0x33333333,\
        0x33333333, 0x33333333, 0x33333333, 0x33333333,\
        0x33333333, 0x33333333, 0x33333333, 0x33333333,\
        0x33333333, 0x33333333, 0x33333333, 0x33333333,\
        0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F,\
        0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F,\
        0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F,\
        0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F,\
        0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF,\
        0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF,\
        0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF,\
        0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF,\
        0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,\
        0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,\
        0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,\
        0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,\
            0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,\
            0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,\
            0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,\
            0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF
    };

inline uint64_t vpu_popcount2(uint64_t* buf, size_t n)  {
    register size_t result = 0;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
    register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
    register __m512i total;
    register __m512i shuf;

#pragma unroll(8)
    for (i = 0; i < n; i+=8) {
        shuf = _mm512_load_epi32(&buf[i]);
        _mm_prefetch((const char *)&buf[i+512], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
        total = _mm512_sub_epi32(shuf, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf,1)));
        total = _mm512_add_epi32(_mm512_and_epi32(B1, total), _mm512_and_epi32(B1,_mm512_srli_epi32(total,2)));
        total = _mm512_and_epi32(B2, _mm512_add_epi32(total, _mm512_srli_epi32(total,4)));
        total = _mm512_and_epi32(B3, _mm512_add_epi32(total, _mm512_srli_epi32(total,8)));
        total = _mm512_and_epi32(B4, _mm512_add_epi32(total, _mm512_srli_epi32(total,16)));

        /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
           is not implementated as a single instruction in VPUv1, thus
           emulated by multiple instructions*/
        result += _mm512_reduce_add_epi32(total);
    }

    return result;
}

inline uint64_t vpu_popcount3(uint64_t* buf, size_t n)  {
    register size_t result = 0;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
    register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
    register __m512i total;
    register __m512i shuf;

#pragma unroll(4)
    for (i = 0; i < n; i+=16) {
        shuf = _mm512_load_epi32(&buf[i]);
        result += _mm_countbits_64(buf[i+8]);
        _mm_prefetch((const char *)&buf[i+512], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+576], _MM_HINT_T1); // vprefetch1
        result += _mm_countbits_64(buf[i+9]);
        _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+128], _MM_HINT_T0); // vprefetch0
        total = _mm512_sub_epi32(shuf, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf,1)));
        result += _mm_countbits_64(buf[i+10]);
        total = _mm512_add_epi32(_mm512_and_epi32(B1, total), _mm512_and_epi32(B1,_mm512_srli_epi32(total,2)));
        result += _mm_countbits_64(buf[i+11]);
        total = _mm512_and_epi32(B2, _mm512_add_epi32(total, _mm512_srli_epi32(total,4)));
        result += _mm_countbits_64(buf[i+12]);
        total = _mm512_and_epi32(B3, _mm512_add_epi32(total, _mm512_srli_epi32(total,8)));
        result += _mm_countbits_64(buf[i+13]);
        total = _mm512_and_epi32(B4, _mm512_add_epi32(total, _mm512_srli_epi32(total,16)));
        result += _mm_countbits_64(buf[i+14]);

        /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
           is not implementated as a single instruction in VPUv1, thus
           emulated by multiple instructions*/
        result += _mm512_reduce_add_epi32(total);
        result += _mm_countbits_64(buf[i+15]);
    }

    return result;
}

/* Using VPU or SSE's machine intrinsic, CPUs not supporting SIMD 
 * will use compiler's implementation, the speed of which depends */
static inline size_t scalar_popcountu(unsigned *buf, size_t n) {
  register size_t cnt = 0;
  size_t i;
#pragma vector always
#pragma unroll(8)
  for (i = 0; i < n; i++) {
    cnt += _mm_countbits_32(buf[i]);
    _mm_prefetch((const char *)&buf[i+512], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
  }
  return cnt;
}

static inline size_t scalar_popcountlu(uint64_t *buf, size_t n) {
  register size_t cnt = 0;
  size_t i;
#pragma vector always
#pragma unroll(8)
  for (i = 0; i < n; i++) {
    cnt += _mm_countbits_64(buf[i]);
    _mm_prefetch((const char *)&buf[i+512], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[i+64], _MM_HINT_T0); // vprefetch0
  }
  return cnt;
}
#endif

可以从https://www.dropbox.com/sh/b3sfqps19wa2oi4/iFQ9wQ1NTg下载支持 OpenMP 的代码总结

该代码是使用 Intel C/C++ Compiler XE 13 使用命令编译的:

icc -debug inline-debug-info -O3 -mmic -fno-alias -ansi-alias -opt-streaming-stores always -ipo popcnt-mmic.cpp -o popcnt-mmic -vec-report=2 -openmp

代码在协处理器(61 个内核)上以“122 个线程”本地运行,线程亲和性为“平衡”,使用导出:

export OMP_NUM_THREADS=122;export KMP_AFFINITY=balanced

我正在使用Xeon Phi SE10p,B1 stepping,CentOS6.4在28兆的垃圾(由rand()填充)上测试并迭代10000次,性能如下:

Buffer allocated at: 0x7f456b000000
OpenMP scalar_popcountu       4310169 us; cnt = 28439328
OpenMP scalar_popcountlu      1421139 us; cnt = 28439328
OpenMP vpu_popcount           1489992 us; cnt = 28439328
OpenMP vpu_popcount2          1109530 us; cnt = 28439328
OpenMP vpu_popcount3           951122 us; cnt = 28439328

“scalar_popcountu”和“scalar_popcountlu”分别使用“_mm_countbits_32”和“_mm_countbits_64”内在函数,它们利用标量“popcnt”指令。设置“#pragma vector always”要求编译器将负载和总和向量化为 16 个无符号整数或 8 个无符号长整数,尽管 popcount 本身仍然是一个标量指令。

vpu_popcount1 的实现类似于 SSSE3 popcount 实现http://wm.ite.pl/articles/sse-popcount.html。但是,1) Xeon Phi 不支持对整数的打包字节操作(最小值是双字,也就是 32 位)和 2) 它不实现“绝对差的打包和”指令(如 SSSE3 中的 _mm_sad_epu8),因此减少添加是由“vpermf32x4”、“vpaddd”和“movslq”四组组合执行的。因此,该实现生成的指令比原始 SSSE3 版本多得多。

vpu_popcount2 的实现类似于 SSE2 popcount 的实现(可以参考《Hacker's Delight》)。该实现生成的指令比 vpu_popcount1 少,速度快 30% 左右。然而,繁琐的“减加”依然无法避免。

vpu_popcount3 的实现非常特定于 Xeon Phi。混合使用向量和标量操作,它比 vpu_popcount2 快约 15%(在我的实现中,向量操作中的标量操作的穿插是休闲的,可以根据编译器生成的汇编代码重新排列标量操作,但预期的改进就我而言是有限的)。改进基于以下观察:1) Xeon Phi 是按顺序调度,2) 每个时钟周期可以发出两条标量指令或“1 个向量+1 个标量”指令。我已将展开从 8 减少到 4,以避免寄存器文件饱和。

每个函数中提前从内存到 L2 8 循环和从 L2 到 L1 1 循环的显式预取使 L1 命中率从 0.38 增加到 0.994。

展开确实将性能提高了约 15%。这是反直觉的,因为 Xeon Phi 是按顺序调度的。但是 unroll 使 icc 编译器能够进行尽可能多的编译时间调度。

我们有更多的技术来提高性能吗?

来自 Brian Nickerson 的两段更快的代码,

OpenMP vpu_popcount2          1110737 us; cnt = 28439328
OpenMP vpu_popcount3           951459 us; cnt = 28439328
OpenMP vpu_popcount3_r         815126 us; cnt = 28439328
OpenMP vpu_popcount5           746852 us; cnt = 28439328

vpu_popcount3_revised:

inline uint64_t vpu_popcount3_revised(uint64_t* buf, size_t n) {
  _mm_prefetch((const char *)&buf[0], _MM_HINT_T0); // vprefetch0
  _mm_prefetch((const char *)&buf[8], _MM_HINT_T0); // vprefetch0
  _mm_prefetch((const char *)&buf[16], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[24], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[32], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[40], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[48], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[56], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[64], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[72], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[80], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[88], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[96], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[104], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[112], _MM_HINT_T1); // vprefetch1
  _mm_prefetch((const char *)&buf[120], _MM_HINT_T1); // vprefetch1
  register size_t result;
  size_t i;

  register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
  register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
  register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
  register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
  register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
  register __m512i total0;
  register __m512i total1;
  register __m512i shuf0;
  register __m512i shuf1;
  register __m512i result0;
  register __m512i result1;

  result0 = _mm512_setzero_epi32();
  result1 = _mm512_setzero_epi32();

  for (i = 0; i < n; i+=16) {
      shuf0 = _mm512_load_epi32(&buf[i  ]);
      shuf1 = _mm512_load_epi32(&buf[i+8]);
      _mm_prefetch((const char *)&buf[i+128], _MM_HINT_T1); // vprefetch1
      _mm_prefetch((const char *)&buf[i+136], _MM_HINT_T1); // vprefetch1
      _mm_prefetch((const char *)&buf[i+16], _MM_HINT_T0); // vprefetch0
      _mm_prefetch((const char *)&buf[i+24], _MM_HINT_T0); // vprefetch0
      total0 = _mm512_sub_epi32(shuf0, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf0,1)));
      total1 = _mm512_sub_epi32(shuf1, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf1,1)));
      total0 = _mm512_add_epi32(_mm512_and_epi32(B1, total0), _mm512_and_epi32(B1,_mm512_srli_epi32(total0,2)));
      total1 = _mm512_add_epi32(_mm512_and_epi32(B1, total1), _mm512_and_epi32(B1,_mm512_srli_epi32(total1,2)));
      total0 = _mm512_and_epi32(B2, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,4)));
      total1 = _mm512_and_epi32(B2, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,4)));
      total0 = _mm512_and_epi32(B3, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,8)));
      total1 = _mm512_and_epi32(B3, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,8)));
      total0 = _mm512_and_epi32(B4, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,16)));
      total1 = _mm512_and_epi32(B4, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,16)));
      result0 = _mm512_add_epi32(result0,total0);
      result1 = _mm512_add_epi32(result1,total1);

  }

  result0 = _mm512_add_epi32(result0,result1);
  result  = _mm512_reduce_add_epi32(result0);

  return result;
}

vpu_popcount5:

inline uint64_t vpu_popcount5(uint64_t* buf, size_t n)  {
    _mm_prefetch((const char *)&buf[0], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[8], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[16], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[24], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[32], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[40], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[48], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[56], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[64], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[72], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[80], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[88], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[96], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[104], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[112], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[120], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[128], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[136], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[144], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[152], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[160], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[168], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[176], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[184], _MM_HINT_T1); // vprefetch1
    register size_t result;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
    register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
    register const __m512i B6 = _mm512_load_epi32((void*)(magic+80));
    register __m512i total0;
    register __m512i total1;
    register __m512i total2;
    register __m512i total3;
    register __m512i shuf0;
    register __m512i shuf1;
    register __m512i shuf2;
    register __m512i shuf3;
    register __m512i result0;
    register __m512i result1;

    result0 = _mm512_setzero_epi32();
    result1 = _mm512_setzero_epi32();

    for (i = 0; i < n; i+=32) {
            shuf0 = _mm512_load_epi32(&buf[i   ]);
            shuf1 = _mm512_load_epi32(&buf[i+ 8]);
            shuf2 = _mm512_load_epi32(&buf[i+16]);
            shuf3 = _mm512_load_epi32(&buf[i+24]);
            _mm_prefetch((const char *)&buf[i+192], _MM_HINT_T1); // vprefetch1
            _mm_prefetch((const char *)&buf[i+200], _MM_HINT_T1); // vprefetch1
            _mm_prefetch((const char *)&buf[i+208], _MM_HINT_T1); // vprefetch1
            _mm_prefetch((const char *)&buf[i+216], _MM_HINT_T1); // vprefetch1
            _mm_prefetch((const char *)&buf[i+32], _MM_HINT_T0); // vprefetch0
            _mm_prefetch((const char *)&buf[i+40], _MM_HINT_T0); // vprefetch0
            _mm_prefetch((const char *)&buf[i+48], _MM_HINT_T0); // vprefetch0
            _mm_prefetch((const char *)&buf[i+56], _MM_HINT_T0); // vprefetch0
            total0 = _mm512_sub_epi32(shuf0, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf0,1)));                        //  max value in nn is 10
            total1 = _mm512_sub_epi32(shuf1, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf1,1)));
            total2 = _mm512_sub_epi32(shuf2, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf2,1)));
            total3 = _mm512_sub_epi32(shuf3, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf3,1)));
            total0 = _mm512_add_epi32(_mm512_and_epi32(B1, total0), _mm512_and_epi32(B1,_mm512_srli_epi32(total0,2))); //  max value in nnnn is 0100
            total1 = _mm512_add_epi32(_mm512_and_epi32(B1, total1), _mm512_and_epi32(B1,_mm512_srli_epi32(total1,2)));
            total2 = _mm512_add_epi32(_mm512_and_epi32(B1, total2), _mm512_and_epi32(B1,_mm512_srli_epi32(total2,2)));
            total3 = _mm512_add_epi32(_mm512_and_epi32(B1, total3), _mm512_and_epi32(B1,_mm512_srli_epi32(total3,2)));
            total0 = _mm512_and_epi32(B2, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,4)));                      //  max value in 0000nnnn is 00001000
            total1 = _mm512_and_epi32(B2, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,4)));
            total2 = _mm512_and_epi32(B2, _mm512_add_epi32(total2, _mm512_srli_epi32(total2,4)));
            total3 = _mm512_and_epi32(B2, _mm512_add_epi32(total3, _mm512_srli_epi32(total3,4)));
            total0 = _mm512_add_epi32(total0, total1);                                                                 //  max value in 000nnnnn is 00010000
            total1 = _mm512_add_epi32(total2, total3);
            total0 = _mm512_add_epi32(total0, _mm512_srli_epi32(total0,8));                                            //  max value in xxxxxxxx00nnnnnn is 00100000
            total1 = _mm512_add_epi32(total1, _mm512_srli_epi32(total1,8));
            total0 = _mm512_and_epi32(B6, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,16)));                     //  max value in each element is 01000000, i.e. 64
            total1 = _mm512_and_epi32(B6, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,16)));
            result0 = _mm512_add_epi32(result0,total0);
            result1 = _mm512_add_epi32(result1,total1);
    }

    result0 = _mm512_add_epi32(result0,result1);
    result  = _mm512_reduce_add_epi32(result0);

    return result;
}
4

2 回答 2

2

自昨天发布以来,我已经能够在我自己的卡片上运行您的代码和我的建议。我没有得到与你完全相同的时间,可能是由于我的硬件步进,也可能与我的编译器版本有关。但趋势依然存在,我的建议似乎实现了大约 15% 的性能提升。

我得到了额外的小幅性能提升,在 5% 到 10% 之间,并进行了一些调整,如下面的代码所示。请注意,在以下代码片段中,B6 将每个元素设置为 0x000000FF。在这一点上,我认为算法可能会逼近从 GDDR 到 L2 缓存的最大可持续带宽。

(补充说明:这个断言的一个证明是,如果我用一个重复它十次的 for 循环包装 popcount5 函数的主体 - 请注意这是输入数据的“chunk_size”的十次快速重复,所以九在 L2 中它会变得炙手可热的时候——测试的总时间只增加了大约 5 倍,而不是 10 倍。我提出这个是因为我认为你的目标是调整位计数逻辑的速度, 但也许您希望在其中部署它的应用程序实际上有一个更小和/或更热的工作集。如果是这样,DRAM--> L2 带宽引入的节流会使情况变得模糊。但请注意,将您的测试输入使其在 L2 中保持更热似乎会导致其他开销(可能是 openmp 开销)变得相对更重要。)

inline uint64_t vpu_popcount5(uint64_t* buf, size_t n)  {
    _mm_prefetch((const char *)&buf[0], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[8], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[16], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[24], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[32], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[40], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[48], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[56], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[64], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[72], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[80], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[88], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[96], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[104], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[112], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[120], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[128], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[136], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[144], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[152], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[160], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[168], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[176], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[184], _MM_HINT_T1); // vprefetch1
    register size_t result;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B6 = _mm512_load_epi32((void*)(magic+80));
    register __m512i total0;
    register __m512i total1;
    register __m512i total2;
    register __m512i total3;
    register __m512i shuf0;
    register __m512i shuf1;
    register __m512i shuf2;
    register __m512i shuf3;
    register __m512i result0;
    register __m512i result1;

    result0 = _mm512_setzero_epi32();
    result1 = _mm512_setzero_epi32();

    for (i = 0; i < n; i+=32) {
        shuf0 = _mm512_load_epi32(&buf[i   ]);
        shuf1 = _mm512_load_epi32(&buf[i+ 8]);
        shuf2 = _mm512_load_epi32(&buf[i+16]);
        shuf3 = _mm512_load_epi32(&buf[i+24]);
        _mm_prefetch((const char *)&buf[i+192], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+200], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+208], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+216], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+32], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+40], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+48], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+56], _MM_HINT_T0); // vprefetch0
        total0 = _mm512_sub_epi32(shuf0, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf0,1)));                        //  max value in nn is 10
        total1 = _mm512_sub_epi32(shuf1, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf1,1)));
        total2 = _mm512_sub_epi32(shuf2, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf2,1)));
        total3 = _mm512_sub_epi32(shuf3, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf3,1)));
        total0 = _mm512_add_epi32(_mm512_and_epi32(B1, total0), _mm512_and_epi32(B1,_mm512_srli_epi32(total0,2))); //  max value in nnnn is 0100
        total1 = _mm512_add_epi32(_mm512_and_epi32(B1, total1), _mm512_and_epi32(B1,_mm512_srli_epi32(total1,2)));
        total2 = _mm512_add_epi32(_mm512_and_epi32(B1, total2), _mm512_and_epi32(B1,_mm512_srli_epi32(total2,2)));
        total3 = _mm512_add_epi32(_mm512_and_epi32(B1, total3), _mm512_and_epi32(B1,_mm512_srli_epi32(total3,2)));
        total0 = _mm512_and_epi32(B2, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,4)));                      //  max value in 0000nnnn is 00001000
        total1 = _mm512_and_epi32(B2, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,4)));
        total2 = _mm512_and_epi32(B2, _mm512_add_epi32(total2, _mm512_srli_epi32(total2,4)));
        total3 = _mm512_and_epi32(B2, _mm512_add_epi32(total3, _mm512_srli_epi32(total3,4)));
        total0 = _mm512_add_epi32(total0, total1);                                                                 //  max value in 000nnnnn is 00010000
        total1 = _mm512_add_epi32(total2, total3);
        total0 = _mm512_add_epi32(total0, _mm512_srli_epi32(total0,8));                                            //  max value in xxxxxxxx00nnnnnn is 00100000
        total1 = _mm512_add_epi32(total1, _mm512_srli_epi32(total1,8));
        total0 = _mm512_and_epi32(B6, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,16)));                     //  max value in each element is 01000000, i.e. 64
        total1 = _mm512_and_epi32(B6, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,16)));
        result0 = _mm512_add_epi32(result0,total0);
        result1 = _mm512_add_epi32(result1,total1);

        /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
           is not implementated as a single instruction in VPUv1, thus
           emulated by multiple instructions*/
    }

    result0 = _mm512_add_epi32(result0,result1);
    result  = _mm512_reduce_add_epi32(result0);

    return result;
}
于 2013-05-30T19:44:51.003 回答
1

Will you please try the following variant, and report back whether this improves performance for you? I am addressing several points that I think aren't quite optimal in your coding:

  • I don't think your prefetching distances were quite right. It looked to me like you might have been thinking of byte offset distances when the indexing was really in terms of uint64's.
  • I see no reason to do the reduction operation every iteration of the loop. You can do partial accumulations of bit counts in the 16 SIMD elements, and then do a single reduction outside the loop.
  • I do not think that doing scalar-side popcount instructions is as advantageous as really getting the best of VPU scheduling. Focusing on an excellent VPU schedule is of highest importance. I also do not think that the scalar popcount instruction actually pairs with a vector operation; i.e. I think it is supported only in the U-pipe.

inline uint64_t vpu_popcount3_revised(uint64_t* buf, size_t n) {
    _mm_prefetch((const char *)&buf[0], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[8], _MM_HINT_T0); // vprefetch0
    _mm_prefetch((const char *)&buf[16], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[24], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[32], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[40], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[48], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[56], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[64], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[72], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[80], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[88], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[96], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[104], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[112], _MM_HINT_T1); // vprefetch1
    _mm_prefetch((const char *)&buf[120], _MM_HINT_T1); // vprefetch1
    register size_t result;
    size_t i;

    register const __m512i B0 = _mm512_load_epi32((void*)(magic+0));
    register const __m512i B1 = _mm512_load_epi32((void*)(magic+16));
    register const __m512i B2 = _mm512_load_epi32((void*)(magic+32));
    register const __m512i B3 = _mm512_load_epi32((void*)(magic+48));
    register const __m512i B4 = _mm512_load_epi32((void*)(magic+64));
    register __m512i total0;
    register __m512i total1;
    register __m512i shuf0;
    register __m512i shuf1;
    register __m512i result0;
    register __m512i result1;

    result0 = _mm512_setzero_epi32();
    result1 = _mm512_setzero_epi32();

    for (i = 0; i < n; i+=16) {
        shuf0 = _mm512_load_epi32(&buf[i  ]);
        shuf1 = _mm512_load_epi32(&buf[i+8]);
        _mm_prefetch((const char *)&buf[i+128], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+136], _MM_HINT_T1); // vprefetch1
        _mm_prefetch((const char *)&buf[i+16], _MM_HINT_T0); // vprefetch0
        _mm_prefetch((const char *)&buf[i+24], _MM_HINT_T0); // vprefetch0
        total0 = _mm512_sub_epi32(shuf0, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf0,1)));
        total1 = _mm512_sub_epi32(shuf1, _mm512_and_epi32(B0, _mm512_srli_epi32(shuf1,1)));
        total0 = _mm512_add_epi32(_mm512_and_epi32(B1, total0), _mm512_and_epi32(B1,_mm512_srli_epi32(total0,2)));
        total1 = _mm512_add_epi32(_mm512_and_epi32(B1, total1), _mm512_and_epi32(B1,_mm512_srli_epi32(total1,2)));
        total0 = _mm512_and_epi32(B2, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,4)));
        total1 = _mm512_and_epi32(B2, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,4)));
        total0 = _mm512_and_epi32(B3, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,8)));
        total1 = _mm512_and_epi32(B3, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,8)));
        total0 = _mm512_and_epi32(B4, _mm512_add_epi32(total0, _mm512_srli_epi32(total0,16)));
        total1 = _mm512_and_epi32(B4, _mm512_add_epi32(total1, _mm512_srli_epi32(total1,16)));
        result0 = _mm512_add_epi32(result0,total0);
        result1 = _mm512_add_epi32(result1,total1);

    }

    /* Reduce add, which is analogous to SSSE3's PSADBW instruction,
       is not implementated as a single instruction in VPUv1, thus
       emulated by multiple instructions*/

    result0 = _mm512_add_epi32(result0,result1);
    result  = _mm512_reduce_add_epi32(result0);

    return result;
}
于 2013-05-30T01:41:39.933 回答