Structure with bit-fields size - c

I tried to use a structure with different sized bit-fields. The total number of bits used is 64. However, when I check the structure size, I get 11 instead of an expected 8. By trying to decompose the structure, I saw the difference came from the day field. If I pack every bit to get 8-bits packs, the day field is packed beetween the "end" of month and the "start" of hour. I don't know if this is a good approach. Can someone explain me that ?
typedef unsigned char uint8_t;
typedef struct frameHeader_t
{
uint8_t encryption : 2;
uint8_t frameVersion : 2;
uint8_t probeType : 4;
uint8_t dataType : 5;
uint8_t measurePeriod : 3;
uint8_t remontePerdiod : 4;
uint8_t nbrMeasure : 2;
uint8_t year : 7;
uint8_t month : 4;
uint8_t day : 5;
uint8_t hour : 5;
uint8_t minute : 6;
uint8_t second : 6;
uint8_t randomization : 5;
uint8_t status : 4;
}FrameHeader;
int main()
{
FrameHeader my_frameHeader;
printf("%d\n", sizeof(FrameHeader));
return 0;
}

If you run it through the pahole tool, you should get an explanation:
struct frameHeader_t {
uint8_t encryption:2; /* 0: 6 1 */
uint8_t frameVersion:2; /* 0: 4 1 */
uint8_t probeType:4; /* 0: 0 1 */
uint8_t dataType:5; /* 1: 3 1 */
uint8_t measurePeriod:3; /* 1: 0 1 */
uint8_t remontePerdiod:4; /* 2: 4 1 */
uint8_t nbrMeasure:2; /* 2: 2 1 */
/* XXX 2 bits hole, try to pack */
uint8_t year:7; /* 3: 1 1 */
/* XXX 1 bit hole, try to pack */
uint8_t month:4; /* 4: 4 1 */
/* XXX 4 bits hole, try to pack */
uint8_t day:5; /* 5: 3 1 */
/* XXX 3 bits hole, try to pack */
uint8_t hour:5; /* 6: 3 1 */
/* XXX 3 bits hole, try to pack */
uint8_t minute:6; /* 7: 2 1 */
/* XXX 2 bits hole, try to pack */
uint8_t second:6; /* 8: 2 1 */
/* XXX 2 bits hole, try to pack */
uint8_t randomization:5; /* 9: 3 1 */
/* XXX 3 bits hole, try to pack */
uint8_t status:4; /* 10: 4 1 */
/* size: 11, cachelines: 1, members: 15 */
/* bit holes: 8, sum bit holes: 20 bits */
/* bit_padding: 4 bits */
/* last cacheline: 11 bytes */
};
You're using uint8_t as the base type so the fields are getting padded to groups of 8 bits.
You should be able to completely eliminate the padding, somewhat more portably than with __attribute((packed)) by using unsigned long long/uint_least64_t (at least 64 bits large) as the base type of the bitfields, but technically the non-int/non-unsigned-int base types for bitfields aren't guaranteed to be supported, but you could use unsigned (at least 16 bits guaranteed by the C standard) after reorganizing the bitfields a little, for example into:
typedef struct frameHeader_t
{
//16
unsigned year : 7;
unsigned randomization : 5;
unsigned month : 4;
//16
unsigned second : 6;
unsigned minute : 6;
unsigned status : 4;
//16
unsigned hour : 5;
unsigned dataType : 5;
unsigned probeType : 4;
unsigned encryption : 2;
//16
unsigned day : 5;
unsigned remontePerdiod : 4;
unsigned measurePeriod : 3;
unsigned nbrMeasure : 2;
unsigned frameVersion : 2;
}FrameHeader;
//should be an unpadded 8 bytes as long as `unsigned` is 16,
//32, or 64 bits wide (I don't know of a platform where it isn't)
(The padding or lack thereof isn't guaranteed, but I've never seen an implementation insert it unless it was necessary.)

to avoid the packing magic I usually use fixed size unsigned types with the same size
typedef struct frameHeader_t
{
uint64_t encryption : 2;
uint64_t frameVersion : 2;
uint64_t probeType : 4;
uint64_t dataType : 5;
uint64_t measurePeriod : 3;
uint64_t remontePerdiod : 4;
uint64_t nbrMeasure : 2;
uint64_t year : 7;
uint64_t month : 4;
uint64_t day : 5;
uint64_t hour : 5;
uint64_t minute : 6;
uint64_t second : 6;
uint64_t randomization : 5;
uint64_t status : 4;
}FrameHeader;
https://godbolt.org/z/BX2QsC

Related

convert struct of bitfields to binary format

R Instruction J Instruction J Instruction Instruction Table
So, I'm trying to convert 32bit instructions to machine-code binary.
Instruction for example: add #3, #5, #9:
has an opcode of 0.
rs will have the first operand value (3), rt will have the 2nd value (5) and rd will have the 3rd (9).
funct 1
6 bits of unused value, just 0s
#x - represent a register in memory
therefore the above R-type instruction will be represented in binary like so:
opCode rs rt rd funct unused
000001 00011 00101 01001 00001 000000 - 32 bits/4B instruction
My attempt to store each instruction:
typedef union __attribute__((__packed__))
{
struct
{
unsigned opcode : 6;
unsigned rs : 5;
unsigned rt : 5;
unsigned rd : 5;
unsigned funct : 5;
unsigned unused : 6;
} r;
struct
{
unsigned first : 8;
unsigned second : 8;
unsigned third : 8;
unsigned fourth : 8;
} bytes;
} r_instruction;
typedef union __attribute__((__packed__))
{
struct
{
unsigned rs : 5;
unsigned rt : 5;
unsigned opcode : 6;
unsigned immed : 16;
} i;
struct
{
unsigned first : 8;
unsigned second : 8;
unsigned third : 8;
unsigned fourth : 8;
} bytes;
} i_instruction;
typedef union __attribute__((__packed__))
{
struct
{
unsigned addr : 25;
unsigned opcode : 6;
unsigned isReg : 1;
} j;
struct
{
unsigned first : 8;
unsigned second : 8;
unsigned third : 8;
unsigned fourth : 8;
} bytes;
} j_instruction;
I need to output each instruction binary (like in the example) to a file, where each line is the instruction described in 4 bytes, each byte in Hexa representation, the byte order is little-endian.
so my input is:
;comment
MAIN: add #3, #5, #9
LOOP: ori #9, -5, #2
la val1
jmp Next
Next: move #20, #4
bgt #0, #2, END
la K
sw #0, 4, #10
bne #31, #9 , LOOP
call val1
jmp #4
END: stop
STR: .asciz "aBcd"
LIST: .db 6, -9
.dh 27056
.dw 5
.entry K
K: .dw 31,-12
.extern val1
desired output
(the first line is the above example's Hexadecimal representation).
but when I try to output each byte like so:
r_instruction inst;
inst.r.opcode = 0;
inst.r.rs = 3;
inst.r.rt = 5;
inst.r.rd = 9;
inst.r.funct = 1;
inst.r.unused = 0;
printf("%x %x %x %x", inst.bytes.first,
inst.bytes.second,
inst.bytes.third, inst.bytes.fourth);
>> c0 28 29 0
my main problem is how to convert my existing instruction to the requested presentation
Instead of using packed structs with bitfields, consider using normal structs and do the generation of the raw instruction yourself. For instance, let's take an instruction of type "J":
typedef struct { uint32_t addr; uint32_t opcode; uint32_t isReg; } j_instr;
// this function generates a 32-bit raw instruction (that you can then convert to ASCII hexadecimal or whatever you want)
uint32_t gen_j_instr(j_instr instr) {
uint32_t result = 0;
// let's add the opcode
result += instr.opcode << 26; // 26 is the starting position of opcode
// let's add the isReg
result += instr.isReg << 25; // 25 is the starting position of isReg
// let's add the addr
result += instr.addr; // no shift needed, addr is at position 0
return result;
}
The C11 standard gives implementations a lot of freedom when using structs with bitfields, therefore you can make few assumptions about their exact binary representation. On the other hand, using the previous method, you're completely independent of how your struct will be represented in binary.
put them in a union, together with a plain unsigned int;
typedef struct __attribute__((__packed__))
{
unsigned opcode : 6;
unsigned rs : 5;
unsigned rt : 5;
unsigned rd : 5;
unsigned funct : 5;
unsigned : 6;
} r_instruction;
typedef struct __attribute__((__packed__))
{
unsigned rs : 5;
unsigned rt : 5;
unsigned opcode : 6;
unsigned immed : 16;
} i_instruction;
typedef struct __attribute__((__packed__))
{
unsigned addr : 25;
unsigned opcode : 6;
unsigned isReg : 1;
} j_instruction;
union all_meuk {
r_instruction r_meuk;
i_instruction i_meuk;
j_instruction j_meuk;
unsigned all;
};

Set first 10 bit of int

I have a 32-bit int and I want to set the first 10 bit to a specific number.
IE
The 32-bit int is:
11101010101010110101100100010010
I want the first 10 bit to be the number 123, which is
0001111011
So the result would be
00011110111010110101100100010010
Does anyone know the easiest way I would be able to do this? I know that we have to do bit-shifting but I'm not good at it so I'm not sure
Thank you!
uint32_t result = (input & 0x3fffff) | (newval << 22);
0x3fffff masks out the highest 10 bits (it has the lowest 22 bits set). You have to shift your new value for the highest 10 bits by 22 places.
Convert inputs to unsigned 32-bit integers
uint32_t num = strtoul("11101010101010110101100100010010", 0, 2);
uint32_t firstbits = 123;
Mask off the lower 32-10 bits. Create mask by shifting a unsigned long 1 22 places left making 100_0000_0000_0000_0000_0000 then decrementing to 11_1111_1111_1111_1111_1111
uint32_t mask = (1UL << (32-10)) - 1;
num &= mask;
Or in firstbits shifted left by 32-10
num |= firstbits << (32-10);
Or in 1 line:
(num & (1UL << (32-10)) - 1) | (firstbits*1UL << (32-10))
Detail about firstbits*1UL. The type of firstbits is not defined by OP and may only be a 16-bit int. To insure code can shift and form an answer that exceeds 16 bits (the minimum width of int), multiple by 1UL to insure the value is unsigned and has at least 32 bit width.
You can "erase" bits (set them to 0) by using a bit wise and ('&'); bits that are 0 in either value will be 0 in the result.
You can set bits to 1 by using a bit wise or ('|'); bits that are 1 in either value will be 1 in the result.
So: and your number with a value where the first 10 bits are 0 and the rest are 1; then 'or' it with the first 10 bits you want put in, and 0 for the other bits. If you need to calculate that value, then a left-shift would be the way to go.
You can also take a mask and replace approach where you zero the lower bits required to hold 123 and then simply | (OR) the value with 123 to gain the final result. You can accomplish the exact same thing with shifts as shown by several other answers, or you can accomplish it with masks:
#include <stdio.h>
#ifndef BITS_PER_LONG
#define BITS_PER_LONG 64
#endif
#ifndef CHAR_BIT
#define CHAR_BIT 8
#endif
char *binpad2 (unsigned long n, size_t sz);
int main (void) {
unsigned x = 0b11101010101010110101100100010010;
unsigned mask = 0xffffff00; /* mask to zero lower 8 bits */
unsigned y = 123; /* value to replace zero bits */
unsigned masked = x & mask; /* zero the lower bits */
/* show intermediate results */
printf ("\n x : %s\n", binpad2 (x, sizeof x * CHAR_BIT));
printf ("\n & mask : %s\n", binpad2 (mask, sizeof mask * CHAR_BIT));
printf ("\n masked : %s\n", binpad2 (masked, sizeof masked * CHAR_BIT));
printf ("\n | 123 : %s\n", binpad2 (y, sizeof y * CHAR_BIT));
masked |= y; /* apply the final or with 123 */
printf ("\n final : %s\n", binpad2 (masked, sizeof masked * CHAR_BIT));
return 0;
}
/** returns pointer to binary representation of 'n' zero padded to 'sz'.
* returns pointer to string contianing binary representation of
* unsigned 64-bit (or less ) value zero padded to 'sz' digits.
*/
char *binpad2 (unsigned long n, size_t sz)
{
static char s[BITS_PER_LONG + 1] = {0};
char *p = s + BITS_PER_LONG;
register size_t i;
for (i = 0; i < sz; i++)
*--p = (n>>i & 1) ? '1' : '0';
return p;
}
Output
$ ./bin/bitsset
x : 11101010101010110101100100010010
& mask : 11111111111111111111111100000000
masked : 11101010101010110101100100000000
| 123 : 00000000000000000000000001111011
final : 11101010101010110101100101111011
How about using bit fields in C combined with a union? The following structure lets you set the whole 32-bit value, the top 10 bits or the bottom 22 bits. It isn't as versatile as a generic function but you can't easily make a mistake when using it. Be aware this and most solutions may not work on all integer sizes and look out for endianness as well.
union uu {
struct {
uint32_t bottom22 : 22;
uint32_t top10 : 10;
} bits;
uint32_t value;
};
Here is an example usage:
int main(void) {
union uu myuu;
myuu.value = 999999999;
printf("value = 0x%08x\n", myuu.value);
myuu.bits.top10 = 0;
printf("value = 0x%08x\n", myuu.value);
myuu.bits.top10 = 0xfff;
printf("value = 0x%08x\n", myuu.value);
return 0;
}
The output is:
value = 0x3b9ac9ff
value = 0x001ac9ff
value = 0xffdac9ff

CRC32 calculation with CRC hash at the beginning of the message in C

I need to calculate CRC of the message and put it at the beginning of this message, so that the final CRC of the message with 'prepended' patch bytes equals 0. I was able to do this very easily with the help of few articles, but not for my specific parameters. The thing is that I have to use a given CRC32 algorithm which calculates the CRC of the memory block, but I don't have that 'reverse' algorithm that calculates those 4 patch bytes/'kind of CRC'. Parameters of the given CRC32 algorithm are:
Polynomial: 0x04C11DB7
Endianess: big-endian
Initial value: 0xFFFFFFFF
Reflected: false
XOR out with: 0L
Test stream: 0x0123, 0x4567, 0x89AB, 0xCDEF results in CRC = 0x612793C3
The code to calculate the CRC (half-byte, table-driven, I hope data type definitions are self-explanatory):
uint32 crc32tab(uint16* data, uint32 len, uint32 crc)
{
uint8 nibble;
int i;
while(len--)
{
for(i = 3; i >= 0; i--)
{
nibble = (*data >> i*4) & 0x0F;
crc = ((crc << 4) | nibble) ^ tab[crc >> 28];
}
data++;
}
return crc;
}
The table needed is (I thougth the short [16] table should contain every 16th element from the large [256] table, but this table contains actually first 16 elements, but that's how it was provided to me):
static const uint32 tab[16]=
{
0x00000000, 0x04C11DB7, 0x09823B6E, 0x0D4326D9,
0x130476DC, 0x17C56B6B, 0x1A864DB2, 0x1E475005,
0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6, 0x2B4BCB61,
0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD
};
I modified the code so it's not so long, but the functionality stays the same. The problem is that this forward CRC calculation looks more like backward/reverse CRC calc.
I've spent almost a week trying to find out the correct polynomial/algorithm/table combination, but with no luck. If it helps, I came up with bit-wise algorithm that corresponds to table-driven code above, although that was not so hard after all:
uint32 crc32(uint16* data, uint32 len, uint32 crc)
{
uint32 i;
while(len--)
{
for(i = 0; i < 16; i++)
{
// #define POLY 0x04C11DB7
crc = (crc << 1) ^ (((crc ^ *data) & 0x80000000) ? POLY : 0);
}
crc ^= *data++;
}
return crc;
}
Here are expected results - first 2 16-bit words make the needed unknown CRC and the rest is the known data itself (by feeding these examples to provided algorithm, the result is 0).
{0x3288, 0xD244, 0xCDEF, 0x89AB, 0x4567, 0x0123}
{0xC704, 0xDD7B, 0x0000} - append as many zeros as you like, the result is the same
{0xCEBD, 0x1ADD, 0xFFFF}
{0x81AB, 0xB932, 0xFFFF, 0xFFFF}
{0x0857, 0x0465, 0x0000, 0x0123}
{0x1583, 0xD959, 0x0123}
^ ^
| |
unknown bytes that I need to calculate
I think testing this on 0xFFFF or 0x0000 words is convenient because the direction of calculation and endianess is not important (I hope :D). So be careful to use other test bytes, because the direction of calculation is quite devious :D. Also you can see that by feeding only zeros to the algorithm (both forward and backward), the result is so-called residue (0xC704DD7B), that may be helpful.
So...I wrote at least 10 different functions (bite-wise, tables, combination of polynomials etc.) trying to solve this, but with no luck. I give you here the function in which I put my hopes into. It's 'reversed' algorithm of the table-driven one above, with different table of course. The problem is that the only correct CRC I get from that is with all 0s message and that's not so unexpected. Also I have written the reversed implementation of the bit-wise algorithm (reversed shifts, etc.), but that one returns only the first byte correctly.
Here is the table-driven one, pointer to data should point to the last element of the message and crc input should be the requested crc (0s for the whole message or you can maybe take another approach - that the last 4 bytes of message are the CRC you are looking for: Calculating CRC initial value instead of appending the CRC to payload) :
uint32 crc32tabrev(uint16* data, uint32 len, uint32 crc)
{
uint8 nibble;
int i;
while(len--)
{
for(i = 0; i < 4; i++)
{
nibble = (*data >> i*4) & 0x0F;
crc = (crc >> 4) ^ revtab[((crc ^ nibble) & 0x0F)];
}
data--;
}
return reverse(crc); //reverse() flips all bits around center (MSB <-> LSB ...)
}
The table, which I hope is 'the chosen one':
static const uint32 revtab[16]=
{
0x00000000, 0x1DB71064, 0x3B6E20C8, 0x26D930AC,
0x76DC4190, 0x6B6B51F4, 0x4DB26158, 0x5005713C,
0xEDB88320, 0xF00F9344, 0xD6D6A3E8, 0xCB61B38C,
0x9B64C2B0, 0x86D3D2D4, 0xA00AE278, 0xBDBDF21C
};
As you can see, this algorithm has some perks which make me run in circles and I think I'm maybe on the right track, but I'm missing something. I hope an extra pair of eyes will see what I can not. I'm sorry for the long post (no potato :D), but I think all of that explanation was neccessary. Thank you in advance for insight or advice.
I will answer for your CRC specification, that of a CRC-32/MPEG-2. I will have to ignore your attempts at calculating that CRC, since they are incorrect.
Anyway, to answer your question, I happen to have written a program that solves this problem. It is called spoof.c. It very rapidly computes what bits to change in a message to get a desired CRC. It does this in order log(n) time, where n is the length of the message. Here is an example:
Let's take the nine-byte message 123456789 (those digits represented in ASCII). We will prepend it with four zero bytes, which we will change to get the desired CRC at the end. The message in hex is then: 00 00 00 00 31 32 33 34 35 36 37 38 39. Now we compute the CRC-32/MPEG-2 for that message. We get 373c5870.
Now we run spoof with this input, which is the CRC length in bits, the fact that it is not reflected, the polynomial, the CRC we just computed, the length of the message in bytes, and all 32 bit locations in the first four bytes (which is what we are allowing spoof to change):
32 0 04C11DB7
373c5870 13
0 0 1 2 3 4 5 6 7
1 0 1 2 3 4 5 6 7
2 0 1 2 3 4 5 6 7
3 0 1 2 3 4 5 6 7
It gives this output with what bits in those first four bytes to set:
invert these bits in the sequence:
offset bit
0 1
0 2
0 4
0 5
0 6
1 0
1 2
1 5
1 7
2 0
2 2
2 5
2 6
2 7
3 0
3 1
3 2
3 4
3 5
3 7
We then set the first four bytes to: 76 a5 e5 b7. We then test by computing the CRC-32/MPEG-2 of the message 76 a5 e5 b7 31 32 33 34 35 36 37 38 39 and we get 00000000, the desired result.
You can adapt spoof.c to your application.
Here is an example that correctly computes the CRC-32/MPEG-2 on a stream of bytes using a bit-wise algorithm:
uint32_t crc32m(uint32_t crc, const unsigned char *buf, size_t len)
{
int k;
while (len--) {
crc ^= (uint32_t)(*buf++) << 24;
for (k = 0; k < 8; k++)
crc = crc & 0x80000000 ? (crc << 1) ^ 0x04c11db7 : crc << 1;
}
return crc;
}
and with a nybble-wise algorithm using the table in the question (which is correct):
uint32_t crc_table[] = {
0x00000000, 0x04C11DB7, 0x09823B6E, 0x0D4326D9,
0x130476DC, 0x17C56B6B, 0x1A864DB2, 0x1E475005,
0x2608EDB8, 0x22C9F00F, 0x2F8AD6D6, 0x2B4BCB61,
0x350C9B64, 0x31CD86D3, 0x3C8EA00A, 0x384FBDBD
};
uint32_t crc32m_nyb(uint32_t crc, const unsigned char *buf, size_t len)
{
while (len--) {
crc ^= (uint32_t)(*buf++) << 24;
crc = (crc << 4) ^ crc_table[crc >> 28];
crc = (crc << 4) ^ crc_table[crc >> 28];
}
return crc;
}
In both cases, the initial CRC must be 0xffffffff.
Alternate approach. Assumes xorout = 0, if not, then after calculating the normal crc, then crc ^= xorout to remove it. The method here multiplies the normal crc by (1/2)%(crc polynomial) raised to (message size in bits) power % (crc polynomial) equivalent to cycling it backwards. If the message size is fixed, then the mapping is fixed and time complexity is O(1). Otherwise, it's O(log(n)).
This example code uses Visual Studio and an intrinsic for carryless multiply (PCLMULQDQ), which uses XMM (128 bit) registers. Visual Studio uses __m128i type to represent integer XMM values.
#include <stdio.h>
#include <stdlib.h>
#include <intrin.h>
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned long long uint64_t;
#define POLY (0x104c11db7ull)
#define POLYM ( 0x04c11db7u)
static uint32_t crctbl[256];
static __m128i poly; /* poly */
static __m128i invpoly; /* 2^64 / POLY */
void GenMPoly(void) /* generate __m128i poly info */
{
uint64_t N = 0x100000000ull;
uint64_t Q = 0;
for(size_t i = 0; i < 33; i++){
Q <<= 1;
if(N&0x100000000ull){
Q |= 1;
N ^= POLY;
}
N <<= 1;
}
poly.m128i_u64[0] = POLY;
invpoly.m128i_u64[0] = Q;
}
void GenTbl(void) /* generate crc table */
{
uint32_t crc;
uint32_t c;
uint32_t i;
for(c = 0; c < 0x100; c++){
crc = c<<24;
for(i = 0; i < 8; i++)
/* assumes twos complement */
crc = (crc<<1)^((0-(crc>>31))&POLYM);
crctbl[c] = crc;
}
}
uint32_t GenCrc(uint8_t * bfr, size_t size) /* generate crc */
{
uint32_t crc = 0xffffffffu;
while(size--)
crc = (crc<<8)^crctbl[(crc>>24)^*bfr++];
return(crc);
}
/* carryless multiply modulo poly */
uint32_t MpyModPoly(uint32_t a, uint32_t b) /* (a*b)%poly */
{
__m128i ma, mb, mp, mt;
ma.m128i_u64[0] = a;
mb.m128i_u64[0] = b;
mp = _mm_clmulepi64_si128(ma, mb, 0x00); /* p[0] = a*b */
mt = _mm_clmulepi64_si128(mp, invpoly, 0x00); /* t[1] = (p[0]*((2^64)/POLY))>>64 */
mt = _mm_clmulepi64_si128(mt, poly, 0x01); /* t[0] = t[1]*POLY */
return mp.m128i_u32[0] ^ mt.m128i_u32[0]; /* ret = p[0] ^ t[0] */
}
/* exponentiate by repeated squaring modulo poly */
uint32_t PowModPoly(uint32_t a, uint32_t b) /* pow(a,b)%poly */
{
uint32_t prd = 0x1u; /* current product */
uint32_t sqr = a; /* current square */
while(b){
if(b&1)
prd = MpyModPoly(prd, sqr);
sqr = MpyModPoly(sqr, sqr);
b >>= 1;
}
return prd;
}
int main()
{
uint32_t inv; /* 1/2 % poly, constant */
uint32_t fix; /* fix value, constant if msg size fixed */
uint32_t crc; /* crc at end of msg */
uint32_t pre; /* prefix for msg */
uint8_t msg[13] = {0x00,0x00,0x00,0x00,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39};
GenMPoly(); /* generate __m128i polys */
GenTbl(); /* generate crc table */
inv = PowModPoly(2, 0xfffffffeu); /* inv = 2^(2^32-2) % Poly = 1/2 % poly */
fix = PowModPoly(inv, 8*sizeof(msg)); /* fix value */
crc = GenCrc(msg, sizeof(msg)); /* calculate normal crc */
pre = MpyModPoly(fix, crc); /* convert to prefix */
printf("crc = %08x pre = %08x ", crc, pre);
msg[0] = (uint8_t)(pre>>24); /* store prefix in msg */
msg[1] = (uint8_t)(pre>>16);
msg[2] = (uint8_t)(pre>> 8);
msg[3] = (uint8_t)(pre>> 0);
crc = GenCrc(msg, sizeof(msg)); /* check result */
if(crc == 0)
printf("passed\n");
else
printf("failed\n");
return 0;
}
Well, few hours after my question, someone whose name I don't remember posted an answer to my question which turned out to be correct. Somehow this answer got completely deleted, I don't know why or who did it, but I'd like to thank to this person and in the case you will see this, please post your answer again and I'll delete this one. But for other users, here's his answer that worked for me, thank you again, mysterious one (unfortunately, I can't replicate his notes and suggestions well enough, just the code itself):
Edit: The original answer came from user samgak, so this stays here until he'll post his answer.
The reverse CRC algorithm:
uint32 revcrc32(uint16* data, uint32 len, uint32 crc)
{
uint32 i;
data += len - 1;
while(len--)
{
crc ^= *data--;
for(i = 0; i < 16; i++)
{
uint32 crc1 = ((crc ^ POLY) >> 1) | 0x80000000;
uint32 crc2 = crc >> 1;
if(((crc1 << 1) ^ (((crc1 ^ *data) & 0x80000000) ? POLY : 0)) == crc)
crc = crc1;
else if(((crc2 << 1) ^ (((crc2 ^ *data) & 0x80000000) ? POLY : 0)) == crc)
crc = crc2;
}
}
return crc;
}
Find patch bytes:
#define CRC_OF_ZERO 0xb7647d
void bruteforcecrc32(uint32 targetcrc)
{
// compute prefixes:
uint16 j;
for(j = 0; j <= 0xffff; j++)
{
uint32 crc = revcrc32(&j, 1, targetcrc);
if((crc >> 16) == (CRC_OF_ZERO >> 16))
{
printf("prefixes: %04lX %04lX\n", (crc ^ CRC_OF_ZERO) & 0xffff, (uint32)j);
return;
}
}
}
Usage:
uint16 test[] = {0x0123, 0x4567, 0x89AB, 0xCDEF}; // prefix should be 0x0CD8236A
bruteforcecrc32(revcrc32(test, 4, 0L));

How to assign const value to bit-fields while it is within the typedef?

I don't have any ideas.
typedef union {
struct {
uint8_t start_bit : 1; // always is 0
uint8_t transmission_bit : 1; // always is 1
uint8_t cmd : 6;
uint32_t arg;
uint8_t crc : 7;
uint8_t end_bit :1; // always is 1
} bit;
uint8_t reg[6];
} Command_t;
I need some of bits always equals 1 or 0.

Alignment of the mixed bit fields and fields of structures in big-endian and little-endian

from my previous experience i understood the following:
// if i have structure in big-endian system, look like this:
typedef struct
{
unsigned long
a: t1,
b: t2,
c: t3,
d: t4,
//...
z: tn;
} TType;
// i can adapt this for little-endian so:
typedef struct
{
unsigned long
z:tn,
//...
d: t4,
c: t3,
b: t2,
a: t1;
} TType;
// and i get identical mapping to memory
or following:
// if i have next structure:
typedef struct
{
unsigned long
a : 2,
b : 5,
c : 6,
d : 3;
} TType2;
// ...
TType2 test;
test.a = 0x2;
test.b = 0x0E;
test.c = 0x3A;
test.d = 0x6;
printf("*(unsigned short *)&test = 0x%04X\n", *(unsigned short *)&test);
// in little-endian system i get: 0xDD3A , or mapping to memory :
// c'-|-----b-----|-a-| |--d--|------c----
// 0 0 1 1 _ 1 0 1 0 _|_ 1 1 0 1 _ 1 1 0 1 _
// in big-endian system i get: 0xD69D , or mapping to memory :
// -----c-----|--d--| |-a-|------b----|-c'
// 1 1 0 1 _ 0 1 1 0 _|_ 1 0 0 1 _ 1 1 0 1 _
If i am not right - please correct me.
My embedded-system is 32-bit little-endian. This device have big-endian hardware, connected via SPI.
Program, that i must adapt for my system, later work with this hardware in 32-bit big-endian system via parallel bus.
I begin to adapt the library, which intends for building and analyzing ethernet framers and something else.
I met the following code (which crash my mind):
#pragma pack(1)
//...
typedef unsigned short word;
//...
#ifdef _MOTOROLA_CPU
typedef struct
{
word ip_ver : 4;
word ihl : 4;
word ip_tos : 8;
word tot_len;
word identification;
word flags : 3;
word fragment_ofs: 13;
word time_to_live: 8;
word protocol : 8;
word check_sum;
IP_ADDRESS_T src;
IP_ADDRESS_T dest;
} IP_MSG_HEADER_T, *IP_MSG_HEADER_P;
#else // Intel CPU.
typedef struct
{
word ip_tos : 8;
word ihl : 4;
word ip_ver : 4;
word tot_len;
word identification;
word fragment_ofs: 13;
word flags : 3;
word protocol : 8;
word time_to_live: 8;
word check_sum;
IP_ADDRESS_T src;
IP_ADDRESS_T dest;
} IP_MSG_HEADER_T, *IP_MSG_HEADER_P;
#endif
But i met and the following:
typedef struct
{
word formid : 5;
word padding_formid : 3;
word TS_in_bundle : 5;
word padding_ts : 3;
word cell_per_frame : 8;
word padding : 8;
} SERVICE_SPEC_OLD_FIELD_T, *SERVICE_SPEC_OLD_FIELD_P
#else // Intel CPU.
typedef struct
{
word padding : 8;
word cell_per_frame : 8;
word padding_ts : 3;
word TS_in_bundle : 5;
word padding_formid : 3;
word formid : 5;
} SERVICE_SPEC_OLD_FIELD_T, *SERVICE_SPEC_OLD_FIELD_P;
#endif /*_MOTOROLA_CPU*/
Similar vagueness everywhere in this library.
I don't see here logic. Is me really stupid, or this code - nonsense?
Also: Whether I am right in the following:
// Two structures
typedef struct
{
unsigned long
a : 1,
b : 3,
c : 12;
unsigned short word;
} Type1;
// and
typedef struct
{
unsigned long
a : 1,
b : 3,
c : 12,
word : 16;
} Type2;
// will be identical in memory for little-endian and different for big-endian?
Thanks in advance!
In this particular case, the structures are big- and little-endian aligned on 16 bit boundaries. If you group each set into 16 bit pieces, you can see that is the size of the things being swapped for endian compatibility.

Resources