Print full uint32_t in hex from struct bitfield - c

I have a structure like below:
struct myCoolStuff{
uint32_t stuff1 : 4;
uint32_t stuff2 : 4;
uint32_t stuff3 : 24;
uint32_t differentField;
}
How can I combine these fields into a hex format for printing to the screen or writing out to a file? Thank you.
struct myCoolStuff data = {.stuff1=0xFF, .stuff2=0x66, .stuff3=0x112233, .differentField=99};
printf("my combined stuff is: %x\n", <combined stuff>);
printf("My full field is: %x\n", data.differentField);
Expected Output:
my combined stuff is: 0xFF66112233
My different field is: 99

First, you can't get 0xFF out of 0xFF after you put it in a 4-bit variable. 0xFF takes 8 bits. Same for 0x66.
As for reinterpretting the bitfields as a single integer, you could,
in a very nonportable fashion (there's big-endian/little-endian issues and the possibility of padding bits) use a union.
( This:
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
struct myCoolStuff{
union{
struct {
uint32_t stuff1 : 4;
uint32_t stuff2 : 4;
uint32_t stuff3 : 24;
};
uint32_t fullField;
};
};
struct myCoolStuff data = {.stuff1=0xFF, .stuff2=0x66, .stuff3=0x112233};
int main()
{
printf("My full field is: %" PRIX32 "\n", data.fullField);
}
prints 1122336F on my x86_64. )
To do it portably you can simply take the bitfields and put them together manually:
This:
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
struct myCoolStuff{
uint32_t stuff1 : 4;
uint32_t stuff2 : 4;
uint32_t stuff3 : 24;
};
struct myCoolStuff data = {.stuff1=0xFF, .stuff2=0x66, .stuff3=0x112233};
int main()
{
uint32_t fullfield = data.stuff1 << 28 | data.stuff2 << 24 | data.stuff3;
printf("My full field is: %" PRIX32 "\n", fullfield);
}
should print F6112233 anywhere where it compiles (uint32_t isn't guaranteed to exist (although on POSIX platforms it will); uint_least32_t would've been more portable.)
Be careful to make sure data.stuff1 has enough bits to be shiftable by 28. Yours does because it's typed uint32_t, but it would be safer to do it e.g., with (data.stuff1 + 0UL)<<28 or (data.stuff1 + UINT32_C(0))<<28 and same for the second shift.

Add a union inside of this struct that you can use to reinterpret the fields.
struct myCoolStuff{
union {
struct {
uint32_t stuff1 : 4;
uint32_t stuff2 : 4;
uint32_t stuff3 : 24;
};
uint32_t stuff;
}
uint32_t fullField;
};
...
printf("my combined stuff is: %x\n", data.stuff);

Multiply (using at least uint32_t math) and then print using the matching specifier.
#include <inttypes.h>
struct myCoolStuff{
uint32_t stuff1 : 4;
uint32_t stuff2 : 4;
uint32_t stuff3 : 24;
uint32_t differentField;
}
uint32_t combined stuff = ((uint32_t) data.stuff1 << (4 + 24)) |
((uint32_t) data.stuff2 << 24) | data.stuff3;
printf("my combined stuff is: 0x%" PRIX32 "\n", combined stuff);
printf("My full field is: %x\n", data.differentField);

Maybe something like this will help :
unsigned char *ptr = (unsigned char *)&data; // store start address
int size = sizeof(myCoolStuff); // get size of struct in bytes
while(size--) // for each byte
{
unsigned char c = *ptr++; // get byte value
printf(" %x ", (unsigned)c); // print byte value
}

Related

conversion from 'long long unsigned int' to 'long long unsigned int:40' changes value from '0xFFFFFFFFFFFFFFFF' to '0xFFFFFFFFFF' [-Werror=overflow]

I have this example code that throws an error when I try to fix one of the GCC warnings
#include <stdint.h>
//
typedef union someStruct
{
uint64_t All;
struct
{
uint64_t Foo : 40;
uint64_t Bar : 24;
} Field;
} someStruct;
#define bits_64 ((uint64_t)(-1))
//
typedef union bits
{
uint64_t oneBit: 1;
uint64_t twoBits: 2;
uint64_t threeBits: 3;
uint64_t fourBits: 4;
uint64_t fiveBits: 5;
uint64_t sixBits: 6;
uint64_t sevenBits: 7;
uint64_t fourtyBits: 40;
uint64_t All;
} bits;
#define bits_40 (((bits)(-1)).fourtyBits)
//
int main()
{
someStruct x;
someStruct y;
x.Field.Foo = bits_64; //-Woverflow warning
//trying to fix the warning with using the bits union
y.Field.Foo = bits_40; // but this throws the error msg below
/*
<source>:30:19: error: cast to union type from type not present in union
30 | #define bits_40 (((bits)(-1)).fourtyBits)
| ^
*/
return 0;
}
How can I use a union to define any number of bits and assign it to any struct field?
P.S. I cannot use enums and/or define a union variable; I have to use macros this way to fit the codebase.
Your #define for bits_40 should look like this:
#define bits_40 (((bits){.All = -1)).fourtyBits)
You could also just do:
#define bits_40 ((1ULL << 40) - 1)
and skip the bits struct entirely. Or you could define a BIT_MASK macro as follows:
#define BIT_MASK(bits) ((1uLL << bits) - 1)
:
:
x.Field.Foo = BIT_MASK(40);

How to always allocate static array with size multiple of 32

Is there easy way to allocate such array to be always size of multiple of 32, without modifying actual define? The reasoning is that if my_data size is increased i want the buffers to be always multiple of 32 but enough to hold my_data but without manual recalculation of size. Ie. if "my data" is 52 i want buffer to be 64 and so on.
typedef struct
{
char one[5];
uint16_t two[2];
uint32_t three[1];
}my_data_t;
#define SIZE_OF_DATA sizeof(my_data_t)
uint8_t array[SIZE_OF_DATA];
I think this should do. The question is why are you doing it in the first place, though.
#define SIZE_OF_DATA sizeof(my_data_t)
#define SIZE_OF_DATA_PADDED_TO_32 ((SIZE_OF_DATA-1)/32+1)*32
uint8_t array[SIZE_OF_DATA_PADDED_TO_32];
If your allocation size is N, but your block size is X (both positive numbers and X > N).
Then your adjusted allocation for N becomes:
N = ((N + X-1)/X)*X;
Hence, this is what you likely want:
#define SIZE_OF_DATA sizeof(my_data_t)
uint8_t array[(SIZE_OF_DATA + 31) / 32) * 32];
It can be done with 3 simple calculation - like
#include <stdio.h>
size_t getArrSize(size_t sz)
{
sz = sz - 1; // Subtract one to make e.g. 32 become 31
sz = sz & ~31; // Remove bits representing values less than 32
sz = sz + 32; // Add 32 to compensate for removed bits and the subtraction by 1
return sz; // sz is now N * 32 and greater or equal the input sz
}
int main(void) {
printf("%zu\n", getArrSize(31));
printf("%zu\n", getArrSize(32));
printf("%zu\n", getArrSize(33));
return 0;
}
Output:
32
32
64
To get it all evaluated at compile time, you can do as shown below:
#include <stdio.h>
#define DESIRED_STRUCT_SIZE 33 // Change this line to change the size of the struct
struct someData
{
char d[DESIRED_STRUCT_SIZE];
};
#define ALIGN_TO_SIZE 32
#define ACTUAL_SIZE sizeof(struct someData)
#define ALIGNED_SIZE (((ACTUAL_SIZE - 1) & ~(ALIGN_TO_SIZE-1)) + ALIGN_TO_SIZE)
int main(void)
{
struct someData arr[ALIGNED_SIZE];
printf("Elements in arr %zu\n", sizeof arr / sizeof arr[0]);
return 0;
}
Output:
Elements in arr 64
Try this, it works in pure C
#define SIZE_OF_DATA ( sizeof(my_data_t)%32? (sizeof(my_data_t)&~31)+32 : sizeof(my_data_t) )
Also taking advantage of C++, in case you are using a C++ compiler for your C program by chance you should better better use a constexpr function, works for variables
template<class T> constexpr size_t size_32(const T& my_data_t)
{
return sizeof(my_data_t) % 32 ? (sizeof(my_data_t) & ~31) + 32 : sizeof(my_data_t);
}
Now easy to test
int main()
{
char my_data32[size_32(my_data)];
cout<< sizeof(my_data32)<< endl;
...
or for just any type
typedef struct
{
char one[5];
uint16_t two[2];
uint32_t three[1];
}my_data_t;
template <class T>constexpr size_t size_32()
{
return sizeof(T) % 32 ? (sizeof(T) & ~31) + 32 : sizeof(T);
}
int main()
{
cout << size_32<my_data_t>() << endl;
The following variant is strictly bound to your struct type
typedef struct
{
char one[49];
uint16_t two[2];
uint32_t three[1];
}my_data_t;
constexpr size_t size_32()
{
return sizeof(my_data_t) % 32 ? (sizeof(my_data_t) & ~31) + 32 : sizeof(my_data_t);
}
int main()
{
char my_32_data[size_32()];
cout << sizeof(my_32_data) << endl;

Bit field memory usage in C

Why does it return with 96 and not 64?
If I sum bit of bit field I will get 64.
Edited:
The var variable has 0xFFFFFF and not 0xFFFFFFFF. -> The var variable has 0x3FFFFFFF00FFFFFF and not 0xFFFFFFFFFFFFFFFF.*
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
typedef struct{
uint32_t a : 24;
uint32_t b : 20;
uint32_t c : 10;
uint32_t d : 6;
uint32_t e : 4;
}MyType_t;
int main(){
MyType_t test;
test.a = -1;
test.b = -1;
test.c = -1;
test.d = -1;
test.e = -1;
uint64_t var = *((uint64_t*)&test);
printf("MyType_t: %d bit\n", sizeof(MyType_t) * 8);//96 bit
printf("Var: %#llX\n", var);//0x3FFFFFFF00FFFFFF
return 0;
}
This code will be worked correctly:
typedef struct{
uint32_t a : 16;
uint32_t b : 16;
uint32_t c : 16;
uint32_t d : 8;
uint32_t e : 8;
}MyType_t;
The fields a and b cannot possibly fit into a single type of uint32_t:
typedef struct{
uint32_t a : 24; //first 32 bits
uint32_t b : 20; //second 32 bits
uint32_t c : 10; //
uint32_t d : 6; //third 32 bits
uint32_t e : 4; //
}MyType_t;
so the size of the struct is three times the size of uint32_t.
The behavior of the code uint64_t var = *((uint64_t*)&test); is not defined.

Filling up the bytes in an int variable

There are two variables,
uint8_t x (8 bit type)
uint16_t y (16 bit type)
, that together hold information about the value of an int num. Say num consists of four bytes abcd (where a is most significant). Then x needs to be copied to b, and y needs to be compied to cd. What is the best way/code to do this?
This works for me:
#include <stdio.h>
#include <stdint.h>
int main()
{
uint8_t x = 0xF2;
uint16_t y = 0x1234;
int a = 0x87654321;
// The core operations that put x and y in a.
a = (a & 0xFF000000) | (x<<16);
a = (a & 0xFFFF0000) | y;
printf("x: %X\n", x);
printf("y: %X\n", y);
printf("a: %X\n", a);
}
Here's the output:
x: F2
y: 1234
a: 87F21234
you can use a union (although be careful with padding/alignment)
typedef union
{
uint32_t abcd;
struct
{
uint8_t a;
uint8_t b;
uint16_t cd;
} parts;
} myvaluetype;
myvaluetype myvalue = {0};
uint8_t x = 42;
uint16_t y = 2311;
myvalue.parts.b = x;
myvalue.parts.cd = y;
printf( "%u\n", myvalue.abcd );
Bytemasks will do. something like below
int8 x = a & 0xff;
int16 y = a & 0xff00;

get16bits macro in hash function

I was looking at hash functions the other day and came across a website that had an example of one. Most of the code was easy to grasp, however this macro function I can't really wrap my head around.
Could someone breakdown what's going on here?
#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) +(uint32_t)(((const uint8_t *)(d))[0]))
Basically it gets the lower 16 bit of the 32 bit integer d
lets break it down
#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) +(uint32_t)(((const uint8_t *)(d))[0]))
uint32_t a = 0x12345678;
uint16_t b = get16bits(&a); // b == 0x00005678
first we must pass the address of a to get16bits() or it will not work.
(((uint32_t)(const uint8_t *)(d))[1])) << 8
this first converts the 32 bit integer into an array of 8 bit integers and retrieves the 2 one.
It then shifts the value by 8 bit so it and adds the lower 8 bits to it
+ (uint32_t)(((const uint8_t *)(d))[0]))
In our example it will be
uint8_t tmp[4] = (uint8_t *)&a;
uint32_t result;
result = tmp[1] << 8; // 0x00005600
result += tmp[0]; //tmp[0] == 0x78
// result is now 0x00005678
The macro is more or less equivalent to:
static uint32_t get16bits(SOMETYPE *d)
{
unsigned char temp[ sizeof *d];
uint32_t val;
memcpy(temp, d, sizeof *d);
val = (temp[0] << 8)
+ temp[1];
return val;
}
, but the macro argument has no type, and the function argument does.
Another way would be to actually cast:
static uint32_t get16bits(SOMETYPE *d)
{
unsigned char *cp = (unsigned char*) d;
uint32_t val;
val = (cp[0] << 8)
+ cp[1];
return val;
}
, which also shows the weakness: by indexing with 1, the code assumes that sizeof (*d) is at least 2.

Resources