Say I have 8 32-bit registers:
A 0-31 E 0-31
B 0-31 F 0-31
C 0-31 G 0-31
D 0-31 H 0-31
And I want their bits rearranged like:
A' := A0 E0 A8 E8 A16 E16 A24 E24 B0 F0 B8 F8 B16 F16 B24 F24 C0 G0 ...etc. H24
B' := A1 E1 A9 E9 A17 E17 A25 E25 B1 F1 B9 F9 B17 F17 B25 F25 C1 G1 ...etc. H25
C' := A2 E2 A10 E10 A18 E18 A26 E26 B2 ... etc.
D' := ... etc.
E' := ... etc.
F' := ... etc.
G' := ... etc.
H' := ... etc.
What would be the most efficient way to compute this shuffling in either C or ARM assembly? (So no intel with SSE, no 64-bit registers, not enough registers to contain both input and output.) The calculator at http://programming.sirrida.de/calcperm.php is really nice, but it doesn't easily extend to multiple words. I believe it can be done more efficiently than the naive way of selecting one bit at the time.
If you make components A0 _ _ _ _ _ _ _ A8 _ _ _ _ _ _ _ A16 etc (just trivial masking).
And similar for the other registers, you can easily make this:
A0 E0 B0 F0 C0 G0 D0 H0 A8 E8 ..
Which you can turn into the right order with two bit_permute_step's, as given by calcperm:
x = bit_permute_step(x, 0x00cc00cc, 6); // Bit index swap 1,3
x = bit_permute_step(x, 0x0000f0f0, 12); // Bit index swap 2,4
Similar story for the other registers, just offset a bit.
Essentially moving 4 bits at once, with a bit of fixup which only happens 8 times.
; 1) Copy the top most 8 bits of H into the lowest bits of the output registers:
lsr H ; H.31 -> carry
rol H' ; carry -> H'.0
lsr H ; H.30 -> carry
rol G' ; carry -> G'.0
lsr H
rol F'
...
lsr H ; H.24 -> carry
rol A' ; carry to A'.0
; 2) go on with top 8 bits of D
lsr D ; D.31 -> carry
rol H' ; H'.0 -> H'.1 and carry -> H'.0
lsr D
rol G'
...
lsr D
rol A'
Continue until all bits are in place. The last step is
lsr A ; A.0 -> carry
rol A' ; A'.0 -> A'.1 -> A'.2 ... and carry -> A'.0
the fastest version I came up with:
// merges 32 bit a (low) and b (hi) into single 64 bit
#define m(a, b) (((uint64_t) (a)) | (((uint64_t) (b)) << 32))
// gets bit at position opos and moves it to position npos
#define s(a, opos, npos) (((opos) >= (npos)) ? (((a) & ( ((uint64_t)1) << (opos))) >> ((opos) - (npos))) : (((a) & (((uint64_t)1) << (opos))) << ((npos) - (opos))))
// gets 8 different bits from 64 bit number and puts them together into 1 byte, starting from idx
#define b(a, idx) (s(a, 0, idx) | s(a, 32, (idx - 1)) | s(a, 8, (idx - 2)) | s(a, 40, (idx - 3)) | s(a, 16, (idx - 4)) | s(a, 48, (idx - 5)) | s(a, 24, (idx - 6)) | s(a, 56, (idx - 7)))
// takes 8 32 bit registers in in, outputs in out
void shuffle(const uint32_t* in, uint32_t* out) {
uint64_t t[4] = { m(in[0], in[4]), m(in[1], in[5]), m(in[2], in[6]), m(in[3], in[7]) };
for (int i = 0; i < 8; i++, t[0] >>= 1, t[1] >>= 1, t[2] >>= 1, t[3] >>= 1)
out[i] = b(t[0], 31) | b(t[1], 23) | b(t[2], 15) | b(t[3], 7);
}
the only "optimization" in comparison to straight forward approach is to merge two 32 bit registers into single 64 bit, so we can reduce number of shifts in loop
On x86 with SSE: punpcklbw (_mm_unpacklo_epi8 can interleave bytes of the source regs.
Use vector shifts then pmovmskb to grab the high bit of each byte, giving you results like
A0 E0 A8 E8 A16 E16 A24 E24
Then combine these byte results to get the 8 dest registers. This kind of sucks, because it requires a shift/pmovmskb for each of result byte. There are 8 * 4 result bytes, so that's a lot of code.
I'm a bit late to the party but I'll post an answer anyway.
Firstly, notice that the bytes in the output words each use only even/odd bits of a pair of input words. combining the odd bits of A with the even bits of B gives all the bits necessary for the first byte of A,C,E,G. Code for the resulting permutation can be found via the calculator linked above and simplifies to two bit swapping operations per word. The resulting bytes can be written back to memory in their correct locations and read back if necessary.
Permuting the bytes within a word costs about as much as writing out the bytes to memory but is also possible.
Cost is 17 bit operations per word. A little less on ARM where rotations are free. Vectorisation is easy with byte shuffling replacing the last step.
The following vanilla C code should do it:
#include <stdio.h>
#include <stdint.h>
int32_t inline bit_permute_step(int32_t x, int32_t m, int shift) {
int32_t t;
t = ((x >> shift) ^ x) & m;
x = (x ^ t) ^ (t << shift);
return x;
}
void permute(int32_t input[8], int32_t output[8]){
int8_t *outputc=(int8_t*)output;
for(int i=0;i<4;i++){
int32_t A=input[3-i];
int32_t E=input[3-i+4];
//swap the even bits of A/B/C/D with the odd bits of E/F/G/H
int32_t t=(A^(E>>1))&0x55555555;
A^=t;E^=t<<1;
A = bit_permute_step(A, 0x00cc00cc, 6); // Bit index swap 1,3
E = bit_permute_step(E, 0x00cc00cc, 6); // Bit index swap 1,3
A = bit_permute_step(A, 0x0000f0f0, 12); // Bit index swap 2,4
E = bit_permute_step(E, 0x0000f0f0, 12); // Bit index swap 2,4
outputc[i+0 ]=A>>24;
outputc[i+4 ]=E>>24;
outputc[i+8 ]=A>>16;
outputc[i+12]=E>>16;
outputc[i+16]=A>>8;
outputc[i+20]=E>>8;
outputc[i+24]=A;
outputc[i+28]=E;
}
}
void printBits(unsigned int num){
for(int bit=31;bit>=0; bit--){
printf("%i", (num>>bit)&1);
if(bit && !(bit&7)){printf(" ");}
}printf("\n");
}
int32_t main(){
volatile int32_t input[8]=
{0xf<<0,0xf<<8,0xf<<16,0xf<<24,0xf<<4,0xf<<12,0xf<<20,0xf<<28};
int32_t output[8]={-1,-1,-1,-1,-1,-1,-1,-1};
printf("input\n");
permute((int32_t*)input,output);
for(int i=0;i<8;i++){
printf(" %c:",'A'+i);
printBits(input[i]);
}
printf("output\n");
for(int i=0;i<8;i++){
printf(" %c:",'A'+i);
printBits(output[i]);
}
}
Related
I need to extract all 10-bit words from a raw bitstream whitch is built as ABACABACABAC...
It already works with a naive C implementation like
for(uint8_t *ptr = in_packet; ptr < max; ptr += 5){
const uint64_t val =
(((uint64_t)(*(ptr + 4))) << 32) |
(((uint64_t)(*(ptr + 3))) << 24) |
(((uint64_t)(*(ptr + 2))) << 16) |
(((uint64_t)(*(ptr + 1))) << 8) |
(((uint64_t)(*(ptr + 0))) << 0) ;
*a_ptr++ = (val >> 0);
*b_ptr++ = (val >> 10);
*a_ptr++ = (val >> 20);
*c_ptr++ = (val >> 30);
}
But performance is inadequate for my application so I would like to improve this using some AVX2 optimisations.
I visited the website https://software.intel.com/sites/landingpage/IntrinsicsGuide/# to find any functions that can help but it seems there is nothing to works with 10-bit words, only 8 or 16-bit. That seems logical since 10-bit is not native for a processor, but it make things hard for me.
Is there any way to use AVX2 to solve this problem?
Your scalar loop does not compile efficiently. Compilers do it as 5 separate byte loads. You can express an unaligned 8-byte load in C++ with memcpy:
#include <stdint.h>
#include <string.h>
// do an 8-byte load that spans the 5 bytes we want
// clang auto-vectorizes using an AVX2 gather for 4 qwords. Looks pretty clunky but not terrible
void extract_10bit_fields_v2calar(const uint8_t *__restrict src,
uint16_t *__restrict a_ptr, uint16_t *__restrict b_ptr, uint16_t *__restrict c_ptr,
const uint8_t *max)
{
for(const uint8_t *ptr = src; ptr < max; ptr += 5){
uint64_t val;
memcpy(&val, ptr, sizeof(val));
const unsigned mask = (1U<<10) - 1; // unused in original source!?!
*a_ptr++ = (val >> 0) & mask;
*b_ptr++ = (val >> 10) & mask;
*a_ptr++ = (val >> 20) & mask;
*c_ptr++ = (val >> 30) & mask;
}
}
ICC and clang auto-vectorize your 1-byte version, but do a very bad job (lots of insert/extract of single bytes). Here's your original and this function on Godbolt (with gcc and clang -O3 -march=skylake)
None of those 3 compilers are really close to what we can do manually.
Manual vectorization
My current AVX2 version of this answer forgot a detail: there are only 3 kinds of fields ABAC, not ABCD like 10-bit RGBA pixels. So I have a version of this which unpacks to 4 separate output streams (which I'll leave in because of the packed-RGBA use-case if I ever add a dedicated version for the ABAC interleave).
The existing version can use vpunpcklwd to interleave the two A parts instead of storing with separate vmovq should work for your case. There might be something more efficient, IDK.
BTW, I find it easier to remember and type instruction mnemonics, not intrinsic names. Intel's online intrinsics guide is searchable by instruction mnemonic.
Observations about your layout:
Each field spans one byte boundary, never two, so it's possible to assemble any 4 pairs of bytes in a qword that hold 4 complete fields.
Or with a byte shuffle, to create 2-byte words that each have a whole field at some offset. (e.g. for AVX512BW vpsrlvw, or for AVX2 2x vpsrld + word-blend.) A word shuffle like AVX512 vpermw would not be sufficient: some individual bytes need to be duplicated with the start of one field and end of another. I.e the source positions aren't all aligned words, especially when you have 2x 5 bytes inside the same 16-byte "lane" of a vector.
00-07|08-15|16-23|24-31|32-39 byte boundaries (8-bit)
00...09|10..19|20...29|30..39 field boundaries (10-bit)
Luckily 8 and 10 have a GCD of 2 which is >= 10-8=2. 8*5 = 4*10 so we don't get all possible start positions, e.g. never a field starting at the last bit of 1 byte, spanning another byte, and including the first bit of a 3rd byte.
Possible AVX2 strategy: unaligned 32-byte load that leave 2x 5 bytes at the top of the low lane, and 2x 5 bytes at the bottom of the high lane. Then vpshufb in-lane shuffle to set up for 2x vpsrlvd variable-count shifts, and a blend.
Quick summary of a new idea I haven't expanded yet.
Given an input of xxx a0B0A0C0 a1B1A1C1 | a2B2A2C2 a3B3A3C3 from our unaligned load, we can get a result of
a0 A0 a1 A1 B0 B1 C0 C1 | a2 A2 a3 A3 B2 B3 C2 C3 with the right choice of vpshufb control.
Then a vpermd can put all of those 32-bit groups into the right order, with all the A elements in the high half (ready for a vextracti128 to memory), and the B and C in the low half (ready for vmovq / vmovhps stores).
Use different vpermd shuffles for adjacent pairs so we can vpblendd to merge them for 128-bit B and C stores.
Old version, probably worse than unaligned load + vpshufb.
With AVX2, one option is to broadcast the containing 64-bit element to all positions in a vector and then use variable-count right shifts to get the bits to the bottom of a dword element.
You probably want to do a separate 64-bit broadcast-load for each group (thus partially overlapping with the previous), instead of trying to pick apart a __m256i of contiguous bits. (Broadcast-loads are cheap, shuffling is expensive.)
After _mm256_srlvd_epi64, then AND to isolate the low 10 bits in each qword.
Repeat that 4 times for 4 vectors of input, then use _mm256_packus_epi32 to do in-lane packing down to 32-bit then 16-bit elements.
That's the simple version. Optimizations of the interleaving are possible, e.g. by using left or right shifts to set up for vpblendd instead of a 2-input shuffle like vpackusdw or vshufps. _mm256_blend_epi32 is very efficient on existing CPUs, running on any port.
This also allows delaying the AND until after the first packing step because we don't need to avoid saturation from high garbage.
Design notes:
shown as 32-bit chunks after variable-count shifts
[0 d0 0 c0 | 0 b0 0 a0] # after an AND mask
[0 d1 0 c1 | 0 b1 0 a1]
[0 d1 0 c1 0 d0 0 c0 | 0 b1 0 a1 0 b0 0 a0] # vpackusdw
shown as 16-bit elements but actually the same as what vshufps can do
---------
[X d0 X c0 | X b0 X a0] even the top element is only garbage right shifted by 30, not quite zero
[X d1 X c1 | X b1 X a1]
[d1 c1 d0 c0 | b1 a1 b0 a0 ] vshufps (can't do d1 d0 c1 c0 unfortunately)
---------
[X d0 X c0 | X b0 X a0] variable-count >> qword
[d1 X c1 X | b1 X a1 0] variable-count << qword
[d1 d0 c1 c0 | b1 b0 a1 a0] vpblendd
This last trick extends to vpblendw, allowing us to do everything with interleaving blends, no shuffle instructions at all, resulting in the outputs we want contiguous and in the right order in qwords of a __m256i.
x86 SIMD variable-count shifts can only be left or right for all elements, so we need to make sure that all the data is either left or right of the desired position, not some of each within the same vector. We could use an immediate-count shift to set up for this, but even better is to just adjust the byte-address we load from. For loads after the first, we know it's safe to load some of the bytes before the first bitfield we want (without touching an unmapped page).
# as 16-bit elements
[X X X d0 X X X c0 | ...] variable-count >> qword
[X X d1 X X X c1 X | ...] variable-count >> qword from an offset load that started with the 5 bytes we want all to the left of these positions
[X d2 X X X c2 X X | ...] variable-count << qword
[d3 X X X c3 X X X | ...] variable-count << qword
[X d2 X d0 X c2 X c0 | ...] vpblendd
[d3 X d1 X c3 X c1 X | ...] vpblendd
[d3 d2 d1 d0 c3 c2 c1 c0 | ...] vpblendw (Same behaviour in both high and low lane)
Then mask off the high garbage inside each 16-bit word
Note: this does 4 separate outputs, like ABCD or RGBA->planar, not ABAC.
// potentially unaligned 64-bit broadcast-load, hopefully vpbroadcastq. (clang: yes, gcc: no)
// defeats gcc/clang folding it into an AVX512 broadcast memory source
// but vpsllvq's ymm/mem operand is the shift count, not data
static inline
__m256i bcast_load64(const uint8_t *p) {
// hopefully safe with strict-aliasing since the deref is inside an intrinsic?
__m256i bcast = _mm256_castpd_si256( _mm256_broadcast_sd( (const double*)p ) );
return bcast;
}
// UNTESTED
// unpack 10-bit fields from 4x 40-bit chunks into 16-bit dst arrays
// overreads past the end of the last chunk by 1 byte
// for ABCD repeating, not ABAC, e.g. packed 10-bit RGBA
void extract_10bit_fields_4output(const uint8_t *__restrict src,
uint16_t *__restrict da, uint16_t *__restrict db, uint16_t *__restrict dc, uint16_t *__restrict dd,
const uint8_t *max)
{
// FIXME: cleanup loop for non-whole-vectors at the end
while( src<max ){
__m256i bcast = bcast_load64(src); // data we want is from bits [0 to 39], last starting at 30
__m256i ext0 = _mm256_srlv_epi64(bcast, _mm256_set_epi64x(30, 20, 10, 0)); // place at bottome of each qword
bcast = bcast_load64(src+5-2); // data we want is from bits [16 to 55], last starting at 30+16 = 46
__m256i ext1 = _mm256_srlv_epi64(bcast, _mm256_set_epi64x(30, 20, 10, 0)); // place it at bit 16 in each qword element
bcast = bcast_load64(src+10); // data we want is from bits [0 to 39]
__m256i ext2 = _mm256_sllv_epi64(bcast, _mm256_set_epi64x(2, 12, 22, 32)); // place it at bit 32 in each qword element
bcast = bcast_load64(src+15-2); // data we want is from bits [16 to 55], last field starting at 46
__m256i ext3 = _mm256_sllv_epi64(bcast, _mm256_set_epi64x(2, 12, 22, 32)); // place it at bit 48 in each qword element
__m256i blend20 = _mm256_blend_epi32(ext0, ext2, 0b10101010); // X d2 X d0 X c2 X c0 | X b2 ...
__m256i blend31 = _mm256_blend_epi32(ext1, ext3, 0b10101010); // d3 X d1 X c3 X c1 X | b3 X ...
__m256i blend3210 = _mm256_blend_epi16(blend20, blend31, 0b10101010); // d3 d2 d1 d0 c3 c2 c1 c0
__m256i res = _mm256_and_si256(blend3210, _mm256_set1_epi16((1U<<10) - 1) );
__m128i lo = _mm256_castsi256_si128(res);
__m128i hi = _mm256_extracti128_si256(res, 1);
_mm_storel_epi64((__m128i*)da, lo); // movq store of the lowest 64 bits
_mm_storeh_pi((__m64*)db, _mm_castsi128_ps(lo)); // movhps store of the high half of the low 128. Efficient: no shuffle uop needed on Intel CPUs
_mm_storel_epi64((__m128i*)dc, hi);
_mm_storeh_pi((__m64*)dd, _mm_castsi128_ps(hi)); // clang pessmizes this to vpextrq :(
da += 4;
db += 4;
dc += 4;
dd += 4;
src += 4*5;
}
}
This compiles (Godbolt) to about 21 front-end uops (on Skylake) in the loop per 4 groups of 4 fields. (Including has a useless register copy for _mm256_castsi256_si128 instead of just using the low half of ymm0 = xmm0). This will be very good on Skylake. There's a good balance of uops for different ports, and variable-count shift is 1 uop for either p0 or p1 on SKL (vs. more expensive previously). The bottleneck might be just the front-end limit of 4 fused-domain uops per clock.
Replays of cache-line-split loads will happen because the unaligned loads will sometimes cross a 64-byte cache-line boundary. But that's just in the back-end, and we have a few spare cycles on ports 2 and 3 because of the front-end bottleneck (4 loads and 4 stores per set of results, with indexed stores which thus can't use port 7). If dependent ALU uops have to get replayed as well, we might start seeing back-end bottlenecks.
Despite the indexed addressing modes, there won't be unlamination because Haswell and later can keep indexed stores micro-fused, and the broadcast loads are a single pure uop anyway, not micro-fused ALU+load.
On Skylake, it can maybe come close to 4x 40-bit groups per 5 clock cycles, if memory bandwidth isn't a bottleneck. (e.g. with good cache blocking.) Once you factor in overhead and cost of cache-line-split loads causing occasional stalls, maybe 1.5 cycles per 40 bits of input, i.e. 6 cycles per 20 bytes of input on Skylake.
On other CPUs (Haswell and Ryzen), the variable-count shifts will be a bottleneck, but you can't really do anything about that. I don't think there's anything better. On HSW it's 3 uops: p5 + 2p0. On Ryzen it's only 1 uop, but it only has 1 per 2 clock throughput (for the 128-bit version), or per 4 clocks for the 256-bit version which costs 2 uops.
Beware that clang pessmizes the _mm_storeh_pi store to vpextrq [mem], xmm, 1: 2 uops, shuffle + store. (Instead of vmovhps : pure store on Intel, no ALU). GCC compiles it as written.
I used _mm256_broadcast_sd even though I really want vpbroadcastq just because there's an intrinsic that takes a pointer operand instead of __m256i (because with AVX1, only the memory-source version existed. But with AVX2, register-source versions of all the broadcast instructions exist). To use _mm256_set1_epi64, I'd have to write pure C that didn't violate strict aliasing (e.g. with memcpy) to do an unaligned uint64_t load. I don't think it will hurt performance to use an FP broadcast load on current CPUs, though.
I'm hoping _mm256_broadcast_sd allows its source operand to alias anything without C++ strict-aliasing undefined behaviour, the same way _mm256_loadu_ps does. Either way it will work in practice if it doesn't inline into a function that stores into *src, and maybe even then. So maybe a memcpy unaligned load would have made more sense!
I've had bad results in the past with getting compilers to emit pmovzxdw xmm0, [mem] from code like _mm_cvtepu16_epi32( _mm_loadu_si64(ptr) ); you often get an actual movq load + reg-reg pmovzx. That's why I didn't try that _mm256_broadcastq_epi64(__m128i).
Old idea; if we already need a byte shuffle we might as well use plain word shifts instead of vpmultishift.
With AVX512VBMI (IceLake, CannonLake), you might want vpmultishiftqb. Instead of broadcasting / shifting one group at a time, we can do all the work for a whole vector of groups after putting the right bytes in the right places first.
You'd still need/want a version for CPUs with some AVX512 but not AVX512VBMI (e.g. Skylake-avx512). Probably vpermd + vpshufb can get the bytes we need into the 128-bit lanes we want.
I don't think we can get away with using only dword-granularity shifts to allow merge-masking instead of dword blend after qword shift. We might be able to merge-mask a vpblendw though, saving a vpblendd
IceLake has 1/clock vpermw and vpermb, single-uop. (It has a 2nd shuffle unit on another port that handles some shuffle uops). So we can load a full vector that contains 4 or 8 groups of 4 elements and shuffle every byte into place efficiently. I think every CPU that has vpermb has it single-uop. (But that's only Ice Lake and the limited-release Cannon Lake).
vpermt2w (to combine 16-bit element from 2 vectors into any order) is one per 2 clock throughput. (InstLatx64 for IceLake-Y), so unfortunately it's not as efficient as the one-vector shuffles.
Anyway, you might use it like this:
64-byte / 512-bit load (includes some over-read at the end from 8x 8-byte groups instead of 8x 5-byte groups. Optionally use a zero-masked load to make this safe near the end of an array thanks to fault suppression)
vpermb to put the 2 bytes containing each field into desired final destination position.
vpsrlvw + vpandq to extract each 10-bit field into a 16-bit word
That's about 4 uops, not including the stores.
You probably want the high half containing the A elements for a contiguous vextracti64x4 and the low half containing the B and C elements for vmovdqu and vextracti128 stores.
Or for 2x vpblenddd to set up for 256-bit stores. (Use 2 different vpermb vectors to create 2 different layouts.)
You shouldn't need vpermt2w or vpermt2d to combine adjacent vectors for wider stores.
Without AVX512VBMI, probably a vpermd + vpshufb can get all the necessary bytes into each 128-bit chunk instead of vpermb. The rest of it only requires AVX512BW which Skylake-X has.
I have an array of uint64_t[4], and I need to generate a mask,
such that the array, if it were a 256-bit integer, equals
(1 << w) - 1, where w goes from 1 to 256.
The best thing I have come up with is branchless, but it takes MANY instructions. It is in Zig because Clang doesn't seem to expose llvm's saturating subtraction. http://localhost:10240/z/g8h1rV
Is there a better way to do this?
var mask: [4]u64 = undefined;
for (mask) |_, i|
mask[i] = 0xffffffffffffffff;
mask[3] ^= ((u64(1) << #intCast(u6, (inner % 64) + 1)) - 1) << #intCast(u6, 64 - (inner % 64));
mask[2] ^= ((u64(1) << #intCast(u6, (#satSub(u32, inner, 64) % 64) + 1)) - 1) << #intCast(u6, 64 - (inner % 64));
mask[1] ^= ((u64(1) << #intCast(u6, (#satSub(u32, inner, 128) % 64) + 1)) - 1) << #intCast(u6, 64 - (inner % 64));
mask[0] ^= ((u64(1) << #intCast(u6, (#satSub(u32, inner, 192) % 64) + 1)) - 1) << #intCast(u6, 64 - (inner % 64));
Are you targeting x86-64 with AVX2 for 256-bit vectors? I thought that was an interesting case to answer for.
If so, you can do this in a few instructions using saturating subtraction and a variable count shift.
x86 SIMD shifts like vpsrlvq saturate the shift count, shifting all the bits out when the count is >= element width. Unlike integer shifts the shift count is masked (and thus wraps around).
For the lowest u64 element, starting with all-ones we need to leave it unmodified for bitpos >= 64. Or for smaller bit positions, right-shift it by 64-bitpos. Unsigned saturating subtraction looks like the way to go here, as you observed, to create a shift count of 0 for larger bitpos. But x86 only has SIMD saturating subtraction, and only for byte or word elements. But if we don't care about bitpos > 256, that's fine we can use 16-bit elements at the bottom of each u64, and let a 0-0 happen in the rest of the u64.
Your code looks pretty overcomplicated, creating (1<<n) - 1 and XORing. I think it's a lot easier to just use a variable-count shift on the 0xFFFF...FF elements directly.
I don't know Zig, so do whatever you have to to get it to emit asm like this. Hopefully this is useful because you tagged this assembly; should be easy to translate to intrinsics for C, or Zig if it has them.
default rel
section .rodata
shift_offsets: dw 64, 128, 192, 256 ; 16-bit elements, to be loaded with zero-extension to 64
section .text
pos_to_mask256:
vpmovzxwq ymm2, [shift_offsets] ; _mm256_set1_epi64x(256, 192, 128, 64)
vpcmpeqd ymm1, ymm1,ymm1 ; ymm1 = all-ones
; set up vector constants, can be hoisted
vmovd xmm0, edi
vpbroadcastq ymm0, xmm0 ; ymm0 = _mm256_set1_epi64(bitpos)
vpsubusw ymm0, ymm2, ymm0 ; ymm0 = {256,192,128,64}-bitpos with unsigned saturation
vpsrlvq ymm0, ymm1, ymm0 ; mask[i] >>= count, where counts >= 64 create 0s.
ret
If the input integer starts in memory, you can of course efficiently broadcast-load it into a ymm register directly.
The shift-offsets vector can of course be hoisted out of a loop, as can the all-ones.
With input = 77, the high 2 elements are zeroed by shifts of 256-77=179, and 192-77=115 bits. Tested with NASM + GDB for EDI=77, and the result is
(gdb) p /x $ymm0.v4_int64
{0xffffffffffffffff, 0x1fff, 0x0, 0x0}
GDB prints low element first, opposite of Intel notation / diagrams. This vector is actually 0, 0, 0x1fff, 0xffffffffffffffff, i.e. 64+13 = 77 one bits, and the rest all zeros. Other test cases
edi=0: mask = all-zero
edi=1: mask = 1
... : mask = edi one bits at the bottom, then zeros
edi=255: mask = all ones except for the top bit of the top element
edi=256: mask = all ones
edi>256: mask = all ones. (unsigned subtraction saturates to 0 everywhere.)
You need AVX2 for the variable-count shifts. psubusb/w is SSE2, so you could consider doing that part with SIMD and then go back to scalar integer for the shifts, or maybe just use SSE2 shifts for one element at a time. Like psrlq xmm1, xmm0 which takes the low 64 bits of xmm0 as the shift count for all elements of xmm1.
Most ISAs don't have saturating scalar subtraction. Some ARM CPUs do for scalar integer, I think, but x86 doesn't. IDK what you're using.
On x86 (and many other ISAs) you have 2 problems:
keep all-ones for low elements (either modify the shift result, or saturate shift count to 0)
produce 0 for high elements above the one containing the top bit of the mask. x86 scalar shifts can't do this at all, so you might feed the shift an input of 0 for that case. Maybe using cmov to create it based on flags set by sub for 192-w or something.
count = 192-w;
shift_input = count<0 ? 0 : ~0ULL;
shift_input >>= count & 63; // mask to avoid UB in C. Optimizes away on x86 where shr does this anyway.
Hmm, this doesn't handle saturating the subtraction to 0 to keep the all-ones, though.
If tuning for ISAs other than x86, maybe look at some other options. Or maybe there's something better on x86 as well. Creating the all-ones or all-zeros with sar reg,63 is an interesting option (broadcast the sign bit), but we actually need all-ones when 192-count has sign bit = 0.
Here's some Zig code that compiles and runs:
const std = #import("std");
noinline fn thing(x: u256) bool {
return x > 0xffffffffffffffff;
}
pub fn main() anyerror!void {
var num: u256 = 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff;
while (thing(num)) {
num /= 2;
std.debug.print(".", .{});
}
std.debug.print("done\n", .{});
}
Zig master generates relatively clean x86 assembler from that.
So I have to find the set bits (on 1) of an unsigned char variable in C?
A similar question is How to count the number of set bits in a 32-bit integer? But it uses an algorithm that's not easily adaptable to 8-bit unsigned chars (or its not apparent).
The algorithm suggested in the question How to count the number of set bits in a 32-bit integer? is trivially adapted to 8 bit:
int NumberOfSetBits( uint8_t b )
{
b = b - ((b >> 1) & 0x55);
b = (b & 0x33) + ((b >> 2) & 0x33);
return (((b + (b >> 4)) & 0x0F) * 0x01);
}
It is simply a case of shortening the constants the the least significant eight bits, and removing the final 24 bit right-shift. Equally it could be adapted for 16bit using an 8 bit shift. Note that in the case for 8 bit, the mechanical adaptation of the 32 bit algorithm results in a redundant * 0x01 which could be omitted.
The fastest approach for an 8-bit variable is using a lookup table.
Build an array of 256 values, one per 8-bit combination. Each value should contain the count of bits in its corresponding index:
int bit_count[] = {
// 00 01 02 03 04 05 06 07 08 09 0a, ... FE FF
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, ..., 7, 8
};
Getting a count of a combination is the same as looking up a value from the bit_count array. The advantage of this approach is that it is very fast.
You can generate the array using a simple program that counts bits one by one in a slow way:
for (int i = 0 ; i != 256 ; i++) {
int count = 0;
for (int p = 0 ; p != 8 ; p++) {
if (i & (1 << p)) {
count++;
}
}
printf("%d, ", count);
}
(demo that generates the table).
If you would like to trade some CPU cycles for memory, you can use a 16-byte lookup table for two 4-bit lookups:
static const char split_lookup[] = {
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
};
int bit_count(unsigned char n) {
return split_lookup[n&0xF] + split_lookup[n>>4];
}
Demo.
I think you are looking for Hamming Weight algorithm for 8bits?
If it is true, here is the code:
unsigned char in = 22; //This is your input number
unsigned char out = 0;
in = in - ((in>>1) & 0x55);
in = (in & 0x33) + ((in>>2) & 0x33);
out = ((in + (in>>4) & 0x0F) * 0x01) ;
Counting the number of digits different than 0 is also known as a Hamming Weight. In this case, you are counting the number of 1's.
Dasblinkenlight provided you with a table driven implementation, and Olaf provided you with a software based solution. I think you have two other potential solutions. The first is to use a compiler extension, the second is to use an ASM specific instruction with inline assembly from C.
For the first alternative, see GCC's __builtin_popcount(). (Thanks to Artless Noise).
For the second alternative, you did not specify the embedded processor, but I'm going to offer this in case its ARM based.
Some ARM processors have the VCNT instruction, which performs the count for you. So you could do it from C with inline assembly:
inline
unsigned int hamming_weight(unsigned char value) {
__asm__ __volatile__ (
"VCNT.8"
: "=value"
: "value"
);
return value;
}
Also see Fastest way to count number of 1s in a register, ARM assembly.
For completeness, here is Kernighan's bit counting algorithm:
int count_bits(int n) {
int count = 0;
while(n != 0) {
n &= (n-1);
count++;
}
return count;
}
Also see Please explain the logic behind Kernighan's bit counting algorithm.
I made an optimized version. With a 32-bit processor, utilizing multiplication, bit shifting and masking can make smaller code for the same task, especially when the input domain is small (8-bit unsigned integer).
The following two code snippets are equivalent:
unsigned int bit_count_uint8(uint8_t x)
{
uint32_t n;
n = (uint32_t)(x * 0x08040201UL);
n = (uint32_t)(((n >> 3) & 0x11111111UL) * 0x11111111UL);
/* The "& 0x0F" will be optimized out but I add it for clarity. */
return (n >> 28) & 0x0F;
}
/*
unsigned int bit_count_uint8_traditional(uint8_t x)
{
x = x - ((x >> 1) & 0x55);
x = (x & 0x33) + ((x >> 2) & 0x33);
x = ((x + (x >> 4)) & 0x0F);
return x;
}
*/
This produces smallest binary code for IA-32, x86-64 and AArch32 (without NEON instruction set) as far as I can find.
For x86-64, this doesn't use the fewest number of instructions, but the bit shifts and downcasting avoid the use of 64-bit instructions and therefore save a few bytes in the compiled binary.
Interestingly, in IA-32 and x86-64, a variant of the above algorithm using a modulo ((((uint32_t)(x * 0x08040201U) >> 3) & 0x11111111U) % 0x0F) actually generates larger code, due to a requirement to move the remainder register for return value (mov eax,edx) after the div instruction. (I tested all of these in Compiler Explorer)
Explanation
I denote the eight bits of the byte x, from MSB to LSB, as a, b, c, d, e, f, g and h.
abcdefgh
* 00001000 00000100 00000010 00000001 (make 4 copies of x
--------------------------------------- with appropriate
abc defgh0ab cdefgh0a bcdefgh0 abcdefgh bit spacing)
>> 3
---------------------------------------
000defgh 0abcdefg h0abcdef gh0abcde
& 00010001 00010001 00010001 00010001
---------------------------------------
000d000h 000c000g 000b000f 000a000e
* 00010001 00010001 00010001 00010001
---------------------------------------
000d000h 000c000g 000b000f 000a000e
... 000h000c 000g000b 000f000a 000e
... 000c000g 000b000f 000a000e
... 000g000b 000f000a 000e
... 000b000f 000a000e
... 000f000a 000e
... 000a000e
... 000e
^^^^ (Bits 31-28 will contain the sum of the bits
a, b, c, d, e, f, g and h. Extract these
bits and we are done.)
Maybe not the fastest, but straightforward:
int count = 0;
for (int i = 0; i < 8; ++i) {
unsigned char c = 1 << i;
if (yourVar & c) {
//bit n°i is set
//first bit is bit n°0
count++;
}
}
For 8/16 bit MCUs, a loop will very likely be faster than the parallel-addition approach, as these MCUs cannot shift by more than one bit per instruction, so:
size_t popcount(uint8_t val)
{
size_t cnt = 0;
do {
cnt += val & 1U; // or: if ( val & 1 ) cnt++;
} while ( val >>= 1 ) ;
return cnt;
}
For the incrementation of cnt, you might profile. If still too slow, an assember implementation might be worth a try using carry flag (if available). While I am in against using assembler optimizations in general, such algorithms are one of the few good exceptions (still just after the C version fails).
If you can omit the Flash, a lookup table as proposed by #dasblinkenlight is likey the fastest approach.
Just a hint: For some architectures (notably ARM and x86/64), gcc has a builtin: __builtin_popcount(), you also might want to try if available (although it takes int at least). This might use a single CPU instruction - you cannot get faster and more compact.
Allow me to post a second answer. This one is the smallest possible for ARM processors with Advanced SIMD extension (NEON). It's even smaller than __builtin_popcount() (since __builtin_popcount() is optimized for unsigned int input, not uint8_t).
#ifdef __ARM_NEON
/* ARM C Language Extensions (ACLE) recommends us to check __ARM_NEON before
including <arm_neon.h> */
#include <arm_neon.h>
unsigned int bit_count_uint8(uint8_t x)
{
/* Set all lanes at once so that the compiler won't emit instruction to
zero-initialize other lanes. */
uint8x8_t v = vdup_n_u8(x);
/* Count the number of set bits for each lane (8-bit) in the vector. */
v = vcnt_u8(v);
/* Get lane 0 and discard other lanes. */
return vget_lane_u8(v, 0);
}
#endif
I decided to continue Fast corners optimisation and stucked at
_mm_movemask_epi8 SSE instruction. How can i rewrite it for ARM Neon with uint8x16_t input?
I know this post is quite outdated but I found it useful to give my (validated) solution. It assumes all ones/all zeroes in every lane of the Input argument.
const uint8_t __attribute__ ((aligned (16))) _Powers[16]=
{ 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 };
// Set the powers of 2 (do it once for all, if applicable)
uint8x16_t Powers= vld1q_u8(_Powers);
// Compute the mask from the input
uint64x2_t Mask= vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8(Input, Powers))));
// Get the resulting bytes
uint16_t Output;
vst1q_lane_u8((uint8_t*)&Output + 0, (uint8x16_t)Mask, 0);
vst1q_lane_u8((uint8_t*)&Output + 1, (uint8x16_t)Mask, 8);
(Mind http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47553, anyway.)
Similarly to Michael, the trick is to form the powers of the indexes of the non-null entries, and to sum them pairwise three times. This must be done with increasing data size to double the stride on every addition. You reduce from 2 x 8 8-bit entries to 2 x 4 16-bit, then 2 x 2 32-bit and 2 x 1 64-bit. The low byte of these two numbers gives the solution. I don't think there is an easy way to pack them together to form a single short value using NEON.
Takes 6 NEON instructions if the input is in the suitable form and the powers can be preloaded.
The obvious solution seems to be completely missed here.
// Use shifts to collect all of the sign bits.
// I'm not sure if this works on big endian, but big endian NEON is very
// rare.
int vmovmaskq_u8(uint8x16_t input)
{
// Example input (half scale):
// 0x89 FF 1D C0 00 10 99 33
// Shift out everything but the sign bits
// 0x01 01 00 01 00 00 01 00
uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));
// Merge the even lanes together with vsra. The '??' bytes are garbage.
// vsri could also be used, but it is slightly slower on aarch64.
// 0x??03 ??02 ??00 ??01
uint32x4_t paired16 = vreinterpretq_u32_u16(
vsraq_n_u16(high_bits, high_bits, 7));
// Repeat with wider lanes.
// 0x??????0B ??????04
uint64x2_t paired32 = vreinterpretq_u64_u32(
vsraq_n_u32(paired16, paired16, 14));
// 0x??????????????4B
uint8x16_t paired64 = vreinterpretq_u8_u64(
vsraq_n_u64(paired32, paired32, 28));
// Extract the low 8 bits from each lane and join.
// 0x4B
return vgetq_lane_u8(paired64, 0) | ((int)vgetq_lane_u8(paired64, 8) << 8);
}
This question deserves a newer answer for aarch64. The addition of new capabilities to Armv8 allows the same function to be implemented in fewer instructions. Here's my version:
uint32_t _mm_movemask_aarch64(uint8x16_t input)
{
const uint8_t __attribute__ ((aligned (16))) ucShift[] = {-7,-6,-5,-4,-3,-2,-1,0,-7,-6,-5,-4,-3,-2,-1,0};
uint8x16_t vshift = vld1q_u8(ucShift);
uint8x16_t vmask = vandq_u8(input, vdupq_n_u8(0x80));
uint32_t out;
vmask = vshlq_u8(vmask, vshift);
out = vaddv_u8(vget_low_u8(vmask));
out += (vaddv_u8(vget_high_u8(vmask)) << 8);
return out;
}
after some tests it looks like following code works correct:
int32_t _mm_movemask_epi8_neon(uint8x16_t input)
{
const int8_t __attribute__ ((aligned (16))) xr[8] = {-7,-6,-5,-4,-3,-2,-1,0};
uint8x8_t mask_and = vdup_n_u8(0x80);
int8x8_t mask_shift = vld1_s8(xr);
uint8x8_t lo = vget_low_u8(input);
uint8x8_t hi = vget_high_u8(input);
lo = vand_u8(lo, mask_and);
lo = vshl_u8(lo, mask_shift);
hi = vand_u8(hi, mask_and);
hi = vshl_u8(hi, mask_shift);
lo = vpadd_u8(lo,lo);
lo = vpadd_u8(lo,lo);
lo = vpadd_u8(lo,lo);
hi = vpadd_u8(hi,hi);
hi = vpadd_u8(hi,hi);
hi = vpadd_u8(hi,hi);
return ((hi[0] << 8) | (lo[0] & 0xFF));
}
Note that I haven't tested any of this, but something like this might work:
X := the vector that you want to create the mask from
A := 0x808080808080...
B := 0x00FFFEFDFCFB... (i.e. 0,-1,-2,-3,...)
X = vand_u8(X, A); // Keep d7 of each byte in X
X = vshl_u8(X, B); // X[7]>>=0; X[6]>>=1; X[5]>>=2; ...
// Each byte of X now contains its msb shifted 7-N bits to the right, where N
// is the byte index.
// Do 3 pairwise adds in order to pack all these into X[0]
X = vpadd_u8(X, X);
X = vpadd_u8(X, X);
X = vpadd_u8(X, X);
// X[0] should now contain the mask. Clear the remaining bytes if necessary
This would need to be repeated once to process a 128-bit vector, since vpadd only works on 64-bit vectors.
I know this question is here for 8 years already but let me give you the answer which might solve all performance problems with emulation. It's based on the blog Bit twiddling with Arm Neon: beating SSE movemasks, counting bits and more.
Most usages of movemask instructions are coming from comparisons where the vectors have 0xFF or 0x00 values from the result of every 16 bytes. After that most cases to use movemasks are to check if none/all match, find leading/trailing or iterate over bits.
If this is the case which often is, then you can use shrn reg1, reg2, #4 instruction. This instruction called Shift-Right-then-Narrow instruction can reduce a 128-bit byte mask to a 64-bit nibble mask (by alternating low and high nibbles to the result). This allows the mask to be extracted to a 64-bit general purpose register.
const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
const uint8x8_t res = vshrn_n_u16(equalMask, 4);
const uint64_t matches = vget_lane_u64(vreinterpret_u64_u8(res), 0);
return matches;
After that you can use all bit operations you typically use on x86 with very minor tweaks like shifting by 2 or doing a scalar AND.
I'm not sure the exact term for what I'm trying to do. I have an 8x8 block of bits stored in 8 bytes, each byte stores one row. When I'm finished, I'd like each byte to store one column.
For example, when I'm finished:
Byte0out = Byte0inBit0 + Bit0inByte1 + Bit0inByte2 + Bit0inByte3 + ...
Byte1out = Bit1inByte0 + Bit1inByte1 + Bit1inByte2 + Bit1inByte3 + ...
What is the easiest way to do this in C which performs well? This will run on a dsPIC microcontroller
This code is cribbed directly from "Hacker's Delight" - Figure 7-2 Transposing an 8x8-bit matrix, I take no credit for it:
void transpose8(unsigned char A[8], int m, int n,
unsigned char B[8]) {
unsigned x, y, t;
// Load the array and pack it into x and y.
x = (A[0]<<24) | (A[m]<<16) | (A[2*m]<<8) | A[3*m];
y = (A[4*m]<<24) | (A[5*m]<<16) | (A[6*m]<<8) | A[7*m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >>14)) & 0x0000CCCC; x = x ^ t ^ (t <<14);
t = (y ^ (y >>14)) & 0x0000CCCC; y = y ^ t ^ (t <<14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[0]=x>>24; B[n]=x>>16; B[2*n]=x>>8; B[3*n]=x;
B[4*n]=y>>24; B[5*n]=y>>16; B[6*n]=y>>8; B[7*n]=y;
}
I didn't check if this rotates in the direction you need, if not you might need to adjust the code.
Also, keep in mind datatypes & sizes - int & unsigned (int) might not be 32 bits on your platform.
BTW, I suspect the book (Hacker's Delight) is essential for the kind of work you're doing... check it out, lots of great stuff in there.
If you are looking for the simplest solution:
/* not tested, not even compiled */
char bytes_in[8];
char bytes_out[8];
/* please fill bytes_in[] here with some pixel-crap */
memset(bytes_out, 0, 8);
for(int i = 0; i < 8; i++) {
for(int j = 0; j < 8; j++) {
bytes_out[i] = (bytes_out[i] << 1) | ((bytes_in[j] >> (7 - i)) & 0x01);
}
}
If your are looking for the fastest solution:
How to transpose a bit matrix in the assembly by utilizing SSE2.
This sounds a lot like a so-called "Chunky to planar" routine used on displays that use bitplanes. The following link uses MC68K assembler for its code, but provides a nice overview of the problem (assuming I understood the question correctly):
http://membres.multimania.fr/amycoders/sources/c2ptut.html
Lisp prototype:
(declaim (optimize (speed 3) (safety 0)))
(defun bit-transpose (a)
(declare (type (simple-array unsigned-byte 1) a))
(let ((b (make-array 8 :element-type '(unsigned-byte 8))))
(dotimes (j 8)
(dotimes (i 8)
(setf (ldb (byte 1 i) (aref b j))
(ldb (byte 1 j) (aref a i)))))
b))
This is how you can run the code:
#+nil
(bit-transpose (make-array 8 :element-type 'unsigned-byte
:initial-contents '(1 2 3 4 5 6 7 8)))
;; => #(85 102 120 128 0 0 0 0)
Occasionally I disassemble code to check that there are no unnecessary calls to safety functions.
#+nil
(disassemble #'bit-transpose)
This is a benchmark. Run the function often enough to process a (binary) HDTV image.
#+nil
(time
(let ((a (make-array 8 :element-type 'unsigned-byte
:initial-contents '(1 2 3 4 5 6 7 8)))
(b (make-array 8 :element-type 'unsigned-byte
:initial-contents '(1 2 3 4 5 6 7 8))))
(dotimes (i (* (/ 1920 8) (/ 1080 8)))
(bit-transpose a))))
That took only took 51ms. Note that I'm consing quite a lot because the function allocates new 8 byte arrays all the time. I'm sure an implementation in C can be tweaked a lot more.
Evaluation took:
0.051 seconds of real time
0.052004 seconds of total run time (0.052004 user, 0.000000 system)
101.96% CPU
122,179,503 processor cycles
1,048,576 bytes consed
Here are some more test cases:
#+nil
(loop for j below 12 collect
(let ((l (loop for i below 8 collect (random 255))))
(list l (bit-transpose (make-array 8 :element-type 'unsigned-byte
:initial-contents l)))))
;; => (((111 97 195 202 47 124 113 164) #(87 29 177 57 96 243 111 140))
;; ((180 192 70 173 167 41 30 127) #(184 212 221 232 193 185 134 27))
;; ((244 86 149 57 191 65 129 178) #(124 146 23 24 159 153 35 213))
;; ((227 244 139 35 38 65 214 64) #(45 93 82 4 66 27 227 71))
;; ((207 62 236 89 50 64 157 120) #(73 19 71 207 218 150 173 69))
;; ((89 211 149 140 233 72 193 192) #(87 2 12 57 7 16 243 222))
;; ((97 144 19 13 135 198 238 33) #(157 116 120 72 6 193 97 114))
;; ((145 119 3 85 41 202 79 134) #(95 230 202 112 11 18 106 161))
;; ((42 153 67 166 175 190 114 21) #(150 125 184 51 226 121 68 58))
;; ((58 232 38 210 137 254 19 112) #(80 109 36 51 233 167 170 58))
;; ((27 245 1 197 208 221 21 101) #(239 1 234 33 115 130 186 58))
;; ((66 204 110 232 46 67 37 34) #(96 181 86 30 0 220 47 10)))
Now I really want to see how my code compares to Andrejs Cainikovs' C solution
(Edit: I think its wrong):
#include <string.h>
unsigned char bytes_in[8]={1,2,3,4,5,6,7,8};
unsigned char bytes_out[8];
/* please fill bytes_in[] here with some pixel-crap */
void bit_transpose(){
memset(bytes_out, 0, 8);
int i,j;
for(i = 0; i < 8; i++)
for(j = 0; j < 8; j++)
bytes_out[i] = (bytes_out[i] << 1) | ((bytes_in[j] >> (7 - i)) & 0x01);
}
int
main()
{
int j,i;
for(j=0;j<100;j++)
for(i=0;i<(1920/8*1080/8);i++)
bit_transpose();
return 0;
}
And benchmarking it:
wg#hp:~/0803/so$ gcc -O3 trans.c
wg#hp:~/0803/so$ time ./a.out
real 0m0.249s
user 0m0.232s
sys 0m0.000s
Each loop over the HDTV image takes 2.5ms. That is quite a lot faster than my unoptimized Lisp.
Unfortunately the C code doesn't give the same results like my lisp:
#include <stdio.h>
int
main()
{
int j,i;
bit_transpose();
for(i=0;i<8;i++)
printf("%d ",(int)bytes_out[i]);
return 0;
}
wg#hp:~/0803/so$ ./a.out
0 0 0 0 1 30 102 170
This is similar to the get column in a bitboard problem and can be solved efficiently by considering those input bytes as 8 bytes of a 64-bit integer. If bit 0 is the least significant one and byte 0 is the first byte in the array then I assume you want to do the following
Column 7 becomes...
↓
[ b07 b06 b05 b04 b03 b02 b01 b00 [ b70 b60 b50 b40 b30 b20 b10 b00 ← row 0
b17 b16 b15 b14 b13 b12 b11 b10 b71 b61 b51 b41 b31 b21 b11 b01
b27 b26 b25 b24 b23 b22 b21 b20 b72 b62 b52 b42 b32 b22 b12 b02
b37 b36 b35 b34 b33 b32 b31 b30 → b73 b63 b53 b43 b33 b23 b13 b03
b47 b46 b45 b44 b43 b42 b41 b40 → b74 b64 b54 b44 b34 b24 b14 b04
b57 b56 b55 b54 b53 b52 b51 b50 b75 b65 b55 b45 b35 b25 b15 b05
b67 b66 b65 b64 b63 b62 b61 b60 b76 b66 b56 b46 b36 b26 b16 b06
b77 b76 b75 b74 b73 b72 b71 b70 ] b77 b67 b57 b47 b37 b27 b17 b07 ]
with bXY is byte X's bit number Y. In that form rotating the left-most column is just packing all the most significant bits into a single byte in reverse order, and similarly other columns can be rotated
To do that we mask out all the last 7 columns and read the array as an uint64_t. The result is
0b h0000000 g0000000 f0000000 e0000000 d0000000 c0000000 b0000000 a0000000
↑ ↑ ↑ ↑ ↑ ↑ ↑ ↑
b77 b67 b57 b47 b37 b27 b17 b07
in little endian, with abcdefgh are b07 to b77 respectively. Now we just need to multiply that value with the magic number 0x0002040810204081 to make a value with hgfedcba in the MSB which is what we expected
uint8_t transpose_column(uint64_t matrix, unsigned col)
{
const uint64_t column_mask = 0x8080808080808080ull;
const uint64_t magic = 0x0002040810204081ull;
return ((matrix << col) & column_mask) * magic >> 56;
}
uint64_t block8x8;
memcpy(&block8x8, bytes_in, sizeof(block8x8));
#if __BYTE_ORDER == __BIG_ENDIAN
block8x8 = swap_bytes(block8x8);
#endif
for (unsigned i = 0; i < 8; i++)
byte_out[i] = transpose_column(block8x8, 7 - i);
Because you treat the 8-byte array as uint64_t, you may need to align the array properly to get better performance because that way only a single memory load is needed
In AVX2 Intel introduced the PDEP instruction (accessible via the _pext_u64 intrinsic) in the BMI2 instruction set for this purpose so the function can be done in a single instruction
data[i] = _pext_u64(matrix, column_mask << (7 - col));
But unfortunately this won't work in dsPIC as you expected
More ways to transpose the array can be found in the chess programming wiki
You really want to do something like this with SIMD instructions with something like the GCC vector vector support: http://ds9a.nl/gcc-simd/example.html
If you wanted an optimized solution you would use the SSE extensions in x86.
You'd need to use 4 of these SIMD opcodes.
MOVQ - move 8 bytes
PSLLW - packed shift left logical words
PMOVMSKB - packed move mask byte
And 2 regular x86 opcodes
LEA - load effective address
MOV - move
byte[] m = byte[8]; //input
byte[] o = byte[8]; //output
LEA ecx, [o]
// ecx = the address of the output array/matrix
MOVQ xmm0, [m]
// xmm0 = 0|0|0|0|0|0|0|0|m[7]|m[6]|m[5]|m[4]|m[3]|m[2]|m[1]|m[0]
PMOVMSKB eax, xmm0
// eax = m[7][7]...m[0][7] the high bit of each byte
MOV [ecx+7], al
// o[7] is now the last column
PSLLW xmm0, 1
// shift 1 bit to the left
PMOVMSKB eax, xmm0
MOV [ecx+6], al
PSLLW xmm0, 1
PMOVMSKB eax, xmm0
MOV [ecx+5], al
PSLLW xmm0, 1
PMOVMSKB eax, xmm0
MOV [ecx+4], al
PSLLW xmm0, 1
PMOVMSKB eax, xmm0
MOV [ecx+3], al
PSLLW xmm0, 1
PMOVMSKB eax, xmm0
MOV [ecx+2], al
PSLLW xmm0, 1
PMOVMSKB eax, xmm0
MOV [ecx+1], al
PSLLW xmm0, 1
PMOVMSKB eax, xmm0
MOV [ecx], al
25 x86 opcodes/instructions as opposed to the stacked for loop solution with 64 iterations.