8

我决定继续快速弯道优化并坚持 _mm_movemask_epi8SSE 指令。如何通过uint8x16_t输入为 ARM Neon 重写它?

4

5 回答 5

7

我知道这篇文章已经过时了,但我发现提供我的(经过验证的)解决方案很有用。它假定 Input 参数的每个通道中的所有 1/所有 0。

const uint8_t __attribute__ ((aligned (16))) _Powers[16]= 
    { 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 };

// Set the powers of 2 (do it once for all, if applicable)
uint8x16_t Powers= vld1q_u8(_Powers);

// Compute the mask from the input
uint64x2_t Mask= vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8(Input, Powers))));

// Get the resulting bytes
uint16_t Output;
vst1q_lane_u8((uint8_t*)&Output + 0, (uint8x16_t)Mask, 0);
vst1q_lane_u8((uint8_t*)&Output + 1, (uint8x16_t)Mask, 8);

(无论如何,请注意http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47553。)

与 Michael 类似,诀窍是形成非空条目的索引的幂,并将它们成对相加 3 次。这必须通过增加数据大小来完成,以使每次添加的步幅加倍。您从 2 x 8 8 位条目减少到 2 x 4 16 位,然后是 2 x 2 32 位和 2 x 1 64 位。这两个数字的低字节给出了解决方案。我认为没有一种简单的方法可以使用 NEON 将它们打包在一起形成一个单一的短值。

如果输入形式合适并且可以预加载电源,则需要 6 条 NEON 指令。

于 2012-09-12T07:58:43.757 回答
5

这里似乎完全错过了明显的解决方案。

// Use shifts to collect all of the sign bits.
// I'm not sure if this works on big endian, but big endian NEON is very
// rare.
int vmovmaskq_u8(uint8x16_t input)
{
    // Example input (half scale):
    // 0x89 FF 1D C0 00 10 99 33

    // Shift out everything but the sign bits
    // 0x01 01 00 01 00 00 01 00
    uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));

    // Merge the even lanes together with vsra. The '??' bytes are garbage.
    // vsri could also be used, but it is slightly slower on aarch64.
    // 0x??03 ??02 ??00 ??01
    uint32x4_t paired16 = vreinterpretq_u32_u16(
                              vsraq_n_u16(high_bits, high_bits, 7));
    // Repeat with wider lanes.
    // 0x??????0B ??????04
    uint64x2_t paired32 = vreinterpretq_u64_u32(
                              vsraq_n_u32(paired16, paired16, 14));
    // 0x??????????????4B
    uint8x16_t paired64 = vreinterpretq_u8_u64(
                              vsraq_n_u64(paired32, paired32, 28));
    // Extract the low 8 bits from each lane and join.
    // 0x4B
    return vgetq_lane_u8(paired64, 0) | ((int)vgetq_lane_u8(paired64, 8) << 8);
}
于 2019-10-14T17:04:18.193 回答
2

经过一些测试,看起来以下代码可以正常工作:

int32_t _mm_movemask_epi8_neon(uint8x16_t input)
{
    const int8_t __attribute__ ((aligned (16))) xr[8] = {-7,-6,-5,-4,-3,-2,-1,0};
    uint8x8_t mask_and = vdup_n_u8(0x80);
    int8x8_t mask_shift = vld1_s8(xr);

    uint8x8_t lo = vget_low_u8(input);
    uint8x8_t hi = vget_high_u8(input);

    lo = vand_u8(lo, mask_and);
    lo = vshl_u8(lo, mask_shift);

    hi = vand_u8(hi, mask_and);
    hi = vshl_u8(hi, mask_shift);

    lo = vpadd_u8(lo,lo);
    lo = vpadd_u8(lo,lo);
    lo = vpadd_u8(lo,lo);

    hi = vpadd_u8(hi,hi);
    hi = vpadd_u8(hi,hi);
    hi = vpadd_u8(hi,hi);

    return ((hi[0] << 8) | (lo[0] & 0xFF));
}
于 2012-08-08T21:37:20.210 回答
1

对于 aarch64,这个问题值得一个更新的答案。向 Armv8 添加的新功能允许以更少的指令实现相同的功能。这是我的版本:

uint32_t _mm_movemask_aarch64(uint8x16_t input)
{   
    const uint8_t __attribute__ ((aligned (16))) ucShift[] = {-7,-6,-5,-4,-3,-2,-1,0,-7,-6,-5,-4,-3,-2,-1,0};
    uint8x16_t vshift = vld1q_u8(ucShift);
    uint8x16_t vmask = vandq_u8(input, vdupq_n_u8(0x80));
    uint32_t out;
    
    vmask = vshlq_u8(vmask, vshift);
    out = vaddv_u8(vget_low_u8(vmask));
    out += (vaddv_u8(vget_high_u8(vmask)) << 8);
    
    return out;
}
于 2021-08-07T17:08:35.623 回答
0

请注意,我还没有测试过这些,但这样的事情可能会起作用:

X := the vector that you want to create the mask from
A := 0x808080808080...
B := 0x00FFFEFDFCFB...  (i.e. 0,-1,-2,-3,...)

X = vand_u8(X, A);  // Keep d7 of each byte in X
X = vshl_u8(X, B);  // X[7]>>=0; X[6]>>=1; X[5]>>=2; ...
// Each byte of X now contains its msb shifted 7-N bits to the right, where N
// is the byte index.
// Do 3 pairwise adds in order to pack all these into X[0]
X = vpadd_u8(X, X); 
X = vpadd_u8(X, X); 
X = vpadd_u8(X, X);
// X[0] should now contain the mask. Clear the remaining bytes if necessary

这需要重复一次以处理 128 位向量,因为vpadd仅适用于 64 位向量。

于 2012-08-08T20:04:40.220 回答