优化从子位掩码生成父位掩码

Optimize generating a parent bitmask from child bitmasks

本文关键字:掩码 优化      更新时间:2023-10-16

给定一个 64 位子掩码输入,例如:

10000000 01000000 00100000 00010000 00001000 00000100 00000010 00000000

8 位父掩码为:

11111110

父掩码中的单个位映射到子掩码字符串中的 8 位,当 8 个子掩码中的一个设置为 1 时,父掩码中的位设置为 1。计算此值的简单算法如下:

unsigned __int64 childMask = 0x8040201008040200; // The number above in hex
unsigned __int8 parentMask = 0;
for (int i = 0; i < 8; i++)
{
const unsigned __int8 child = childMask >> (8 * i);
parentMask |= (child > 0) << i;
}

我想知道上面的代码中是否还有任何优化要做。代码将在 CUDA 上运行,我想尽可能避免分支。对于答案,C++/C 中的代码可以正常工作。for 循环可以展开,但我宁愿将其留给编译器进行优化,并在必要时使用例如#pragma unroll提供提示。

一种可能的方法是使用__vcmpgtu4进行每字节比较,将结果作为打包掩码返回,可以使用 0x08040201 进行 AND-ed(0x80402010 表示高半部分(将它们转换为最终结果的位,但随后需要水平求和,这似乎没有得到很好的支持,但它可以用普通的旧 C 风格代码来完成。

例如

unsigned int low = childMask;
unsigned int high = childMask >> 32;
unsigned int lowmask = __vcmpgtu4(low, 0) & 0x08040201;
unsigned int highmask = __vcmpgtu4(high, 0) & 0x80402010;
unsigned int mask = lowmask | highmask;
mask |= mask >> 16;
mask |= mask >> 8;
parentMask = mask & 0xff;

这种基于经典位抖动技术的解决方案可能比 CUDA 支持的至少某些 GPU 架构上的公认答案更快,因为__vcmp*内部函数在所有架构上都不是很快。

由于GPU基本上是32位架构,因此64位childMask被处理为两半,hilo

处理包括三个步骤。第一步,我们将每个非空字节设置为0x80,否则保持字节不变。换句话说,如果字节不为零,我们设置每个字节的最高有效位。一种方法是使用1980年代设计的空字节检测算法Alan Mycroft的修改版本,该算法通常用于C字符串处理。或者,我们可以使用hadd (~0, x)只有在x != 0时才设置最高有效位的事实,其中hadd是减半加法:hadd (a, b) = (a + b) / 2而不会在中间计算中溢出。彼得·蒙哥马利(Peter L. Montgomery(于2000年发表了一个有效的实现。

在第二步中,我们将每个字节的最高有效位收集到最高的半字节中。为此,我们需要将位 7 移动到位 28,将位 15 移动到位 29,将位 23 移动到位 30,将位 31 移动到位 31,对应于 21、14、7 和 0 的偏移因子。为了避免单独的移位,我们将移位因子组合成一个"魔术"乘数,然后乘以该乘数,从而并行执行所有移位。

在第三步中,我们将包含结果的半字节组合在一起,并将它们移动到正确的位位置。对于hi词,这意味着将胭咬<31:28>移动到位<7:4>,对于lo字,这意味着将胭咬<31:28>移动到位<3:0>。这种组合可以通过按位 OR 或加法来执行。哪种变体更快可能取决于目标体系结构。

#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#define USE_HAROLDS_SOLUTION  (0)
#define USE_MYCROFT_ZEROBYTE  (0)
#define USE_TWO_MASKS         (1)
#define USE_ADD_COMBINATION   (1)
uint8_t parentMask (uint64_t childMask)
{
#if USE_TWO_MASKS
const uint32_t LSB_MASK = 0x01010101;
#endif // USE_TWO_MASKS
const uint32_t MSB_MASK = 0x80808080;
const uint32_t MAGICMUL = (1 << 21) | (1 << 14) | (1 << 7) | (1 << 0);
uint32_t lo, hi;
/* split 64-bit argument into two halves for 32-bit GPU architecture */
lo = (uint32_t)(childMask >>  0);
hi = (uint32_t)(childMask >> 32);
#if USE_MYCROFT_ZEROBYTE
/* Set most significant bit in each byte that is not zero. Adapted from Alan 
Mycroft's null-byte detection algorithm (newsgroup comp.lang.c, 1987/04/08,
https://groups.google.com/forum/#!original/comp.lang.c/2HtQXvg7iKc/xOJeipH6KLMJ):
null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
*/
#if USE_TWO_MASKS
lo = (((lo | MSB_MASK) - LSB_MASK) | lo) & MSB_MASK;
hi = (((hi | MSB_MASK) - LSB_MASK) | hi) & MSB_MASK;
#else // USE_TWO_MASKS
lo = (((lo & ~MSB_MASK) + ~MSB_MASK) | lo) & MSB_MASK;
hi = (((hi & ~MSB_MASK) + ~MSB_MASK) | hi) & MSB_MASK;
#endif // USE_TWO_MASKS
#else // USE_MYCROFT_ZEROBYTE
/* Set most significant bit in each byte that is not zero. Use hadd(~0,x).
Peter L. Montgomery's observation (newsgroup comp.arch, 2000/02/11,
https://groups.google.com/d/msg/comp.arch/gXFuGZtZKag/_5yrz2zDbe4J):
(A+B)/2 = (A AND B) + (A XOR B)/2.
*/
#if USE_TWO_MASKS
lo = (((~lo & ~LSB_MASK) >> 1) + lo) & MSB_MASK;
hi = (((~hi & ~LSB_MASK) >> 1) + hi) & MSB_MASK;
#else // USE_TWO_MASKS
lo = (((~lo >> 1) & ~MSB_MASK) + lo) & MSB_MASK;
hi = (((~hi >> 1) & ~MSB_MASK) + hi) & MSB_MASK;
#endif // USE_TWO_MASKS
#endif // USE_MYCROFT_ZEROBYTE
/* collect most significant bit of each byte in most significant nibble */
lo = lo * MAGICMUL;
hi = hi * MAGICMUL;
/* combine nibbles with results for high and low half into final result */
#if USE_ADD_COMBINATION    
return (uint8_t)((hi >> 24) + (lo >> 28));
#else // USE_ADD_COMBINATION
return (uint8_t)((hi >> 24) | (lo >> 28));
#endif // USE_ADD_COMBINATION
}
uint8_t parentMask_ref (uint64_t childMask)
{
uint8_t parentMask = 0;
for (uint32_t i = 0; i < 8; i++) {
uint8_t child = childMask >> (8 * i);
parentMask |= (child > 0) << i;
}
return parentMask;
}
uint32_t build_mask (uint32_t a)
{
return ((a & 0x80808080) >> 7) * 0xff;
}
uint32_t vcmpgtu4 (uint32_t a, uint32_t b)
{
uint32_t r;
r = ((a & ~b) + (((a ^ ~b) >> 1) & 0x7f7f7f7f));
r = build_mask (r);
return r;
}
uint8_t parentMask_harold (uint64_t childMask)
{
uint32_t low = childMask;
uint32_t high = childMask >> 32;
uint32_t lowmask = vcmpgtu4 (low, 0) & 0x08040201;
uint32_t highmask = vcmpgtu4 (high, 0) & 0x80402010;
uint32_t mask = lowmask | highmask;
mask |= mask >> 16;
mask |= mask >> 8;
return (uint8_t)mask;
}
/*
From: geo <gmars...@gmail.com>
Newsgroups: sci.math,comp.lang.c,comp.lang.fortran
Subject: 64-bit KISS RNGs
Date: Sat, 28 Feb 2009 04:30:48 -0800 (PST)
This 64-bit KISS RNG has three components, each nearly
good enough to serve alone.    The components are:
Multiply-With-Carry (MWC), period (2^121+2^63-1)
Xorshift (XSH), period 2^64-1
Congruential (CNG), period 2^64
*/
static uint64_t kiss64_x = 1234567890987654321ULL;
static uint64_t kiss64_c = 123456123456123456ULL;
static uint64_t kiss64_y = 362436362436362436ULL;
static uint64_t kiss64_z = 1066149217761810ULL;
static uint64_t kiss64_t;
#define MWC64  (kiss64_t = (kiss64_x << 58) + kiss64_c, 
kiss64_c = (kiss64_x >> 6), kiss64_x += kiss64_t, 
kiss64_c += (kiss64_x < kiss64_t), kiss64_x)
#define XSH64  (kiss64_y ^= (kiss64_y << 13), kiss64_y ^= (kiss64_y >> 17), 
kiss64_y ^= (kiss64_y << 43))
#define CNG64  (kiss64_z = 6906969069ULL * kiss64_z + 1234567ULL)
#define KISS64 (MWC64 + XSH64 + CNG64)
int main (void)
{
uint64_t childMask, count = 0;
uint8_t res, ref;
do {
childMask = KISS64;
ref = parentMask_ref (childMask);
#if USE_HAROLDS_SOLUTION
res = parentMask_harold (childMask);
#else // USE_HAROLDS_SOLUTION
res = parentMask (childMask);
#endif // USE_HAROLDS_SOLUTION
if (res != ref) {
printf ("narg=%016llx  res=%02x  ref=%02xn", childMask, res, ref);
return EXIT_FAILURE;
}
if (!(count & 0xffffff)) printf ("r%llu", count);
count++;
} while (1);
return EXIT_SUCCESS;
}