How to put a hex value into a 24 bit struct - c

So i have 2 structs:
struct cmd {
uint8_t a;
uint8_t b;
uint8_t c;
};
typedef struct someName{
uint8_t size;
struct cmd cmdID;
} someName_t;
And i got a char res[0] containing the string "0xabc".
This 0xabc need to be put inside the cmd struct.
But the problem is 0xabc is 12 bit (1010 1011 1100) so if i put this into the struct with only uint8_t a and uint8_t b it will work because it will "fit" into 16 bits. But i got uint8_t a, uint8_t b and uint8_t c so 24 bits and that is my problem..
I tried:
someName_t msg;
sscanf(res[0], "0x%x", &(msg.cmdID));
But this does not work. This does work however if i remove the uint8_t c variable from the struct because it then fits inside the remaining 16 bits..
So how can i get the value "0xabc" into this (24bit) struct without adjusting the struct.

If you have control of your input format, i.e. you can guarantee it will always be something like 0xabc, then you can try:
const char input[] = "0xabc";
uint32_t tmp;
sscanf(input, "0x%x", &tmp);
struct cmd cmdid;
cmdid.a = (tmp & 0xFF0000U) >> 16;
cmdid.b = (tmp & 0xFF00U) >> 8;
cmdid.c = (tmp & 0xFFU);

You could try changing struct cmd to:
struct cmd {
unsigned int c:4;
unsigned int b:4;
unsigned int a:4;
};
But YMMV depending on compiler. Works OK on MSVC++ 2010.

Related

Turning a bitfield into a uint8_t

My bitfield below represents the 7 status flags for the 6502 CPU. I am trying to emulate the instruction php, which pushes a copy of the status flags onto the stack.
struct Flags {
uint8_t C: 1;
uint8_t Z: 1;
uint8_t I: 1;
uint8_t D: 1;
uint8_t V: 1;
uint8_t N: 1;
uint8_t B: 2;
};
I need a way to pack each field into a single-byte datatype like uint8_t so that I can push it to the stack. However, the code below gives this error: operand of type 'struct Flags' where arithmetic or pointer type is required. How do I resolve this problem?
int main() {
struct Flags f = {0, 0, 0, 0, 1, 0, 0};
uint8_t f_as_byte = (uint8_t) f;
}
The problem with bitfields is that it is implementation-defined in what order the bits are laid out. This could be rather unacceptable for a 6502 emulator. The PHP command must push the status word in the exact desired format, i.e. something like
uint8_t as_state = f.N << 7 | f.V << 6 | f.B << 4 | f.D << 3 | f.I << 2 | f.Z << 1 | f.C;
Your layout is wrong, the B member is in wrong position. All in all considering the complex code like one above, maybe it would be easier to just consider the flags as a single uint8_t and have access macros for it, something like
#define FLAG_N 0x80U
...
#define FLAG_C 0x1U
#define SET_FLAG(flag_var, flag) ((flag_var) |= (flag))
#define CLR_FLAG(flag_var, flag) ((flag_var) &= ~(flag))
#define GET_FLAG(flag_var, flag) ((_Bool)((flag_var) & (flag)))
uint8_t flags = 0;
SET_FLAG(flags, FLAG_C);
if (GET_FLAG(flags, FLAG_N)) { ... }
That way the PHP instruction can be coded to just push flags as is...
Except that the B flag that is pushed is not a flag from the status register...
You can do it with a union:
typedef unsigned char uint8_t;
struct Flags {
uint8_t C:1;
uint8_t Z:1;
uint8_t I:1;
uint8_t D:1;
uint8_t V:1;
uint8_t N:1;
uint8_t B:2;
};
union uFlags {
uint8_t B;
struct Flags F;
};
int
main()
{
struct Flags f = { 0, 0, 0, 0, 1, 0, 0 };
union uFlags u;
u.F = f;
uint8_t f_as_byte = u.B;
}
With a designated initializer, you may be able to initialize the union directly without a separate f:
typedef unsigned char uint8_t;
struct Flags {
uint8_t C:1;
uint8_t Z:1;
uint8_t I:1;
uint8_t D:1;
uint8_t V:1;
uint8_t N:1;
uint8_t B:2;
};
union uFlags {
uint8_t B;
struct Flags F;
};
int
main()
{
union uFlags u = {
.F = { .V = 1 }
};
uint8_t f_as_byte = u.B;
}
Instead of casting the structure as a (uint8_t), you should take its address and cast that as a (uint8_t *):
#include <stdio.h>
struct Flags {
uint8_t C: 1;
uint8_t Z: 1;
uint8_t I: 1;
uint8_t D: 1;
uint8_t V: 1;
uint8_t N: 1;
uint8_t B: 2;
};
int main() {
struct Flags f = {0, 0, 0, 0, 1, 0, 0};
uint8_t f_as_byte = *(uint8_t *)&f;
printf("flags: %.2X\n", f_as_byte);
return 0;
}
Note however that the actual bit values used to represent the bit-field members are implementation defined. The C Standard does not even guarantee that the Flags structure fit in a single byte. This make bit-fields a risky choice to represent hardware registers or instruction opcodes as a would make the emulator non-portable. Little-endian and big-endian systems typically use a different layout for bit-fields members.

Creating bmp file in C

I am trying to create .bmp file (filled with one colour for testing purposes).
Here is code that I'm using:
#include <stdio.h>
#define BI_RGB 0
typedef unsigned int UINT;
typedef unsigned long DWORD;
typedef long int LONG;
typedef unsigned short WORD;
typedef unsigned char BYTE;
typedef struct tagBITMAPFILEHEADER {
UINT bfType;
DWORD bfSize;
UINT bfReserved1;
UINT bfReserved2;
DWORD bfOffBits;
} BITMAPFILEHEADER;
typedef struct tagBITMAPINFOHEADER {
DWORD biSize;
LONG biWidth;
LONG biHeight;
WORD biPlanes;
WORD biBitCount;
DWORD biCompression;
DWORD biSizeImage;
LONG biXPelsPerMeter;
LONG biYPelsPerMeter;
DWORD biClrUsed;
DWORD biClrImportant;
} BITMAPINFOHEADER;
typedef struct COLORREF_RGB
{
BYTE cRed;
BYTE cGreen;
BYTE cBlue;
}COLORREF_RGB;
int main(int argc, char const *argv[])
{
BITMAPINFOHEADER bih;
bih.biSize = sizeof(BITMAPINFOHEADER);
bih.biWidth = 600;
bih.biHeight = 600;
bih.biSizeImage = bih.biWidth * bih.biHeight * 3;
bih.biPlanes = 1;
bih.biBitCount = 24;
bih.biCompression = BI_RGB;
bih.biXPelsPerMeter = 2835;
bih.biYPelsPerMeter = 2835;
bih.biClrUsed = 0;
bih.biClrImportant = 0;
COLORREF_RGB rgb;
rgb.cRed = 0;
rgb.cGreen = 0;
rgb.cBlue = 0;
BITMAPFILEHEADER bfh;
bfh.bfType = 0x424D;
bfh.bfReserved1 = 0;
bfh.bfReserved2 = 0;
bfh.bfOffBits = sizeof(BITMAPFILEHEADER) + bih.biSize;
bfh.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) +
bih.biWidth * bih.biHeight * 4;
FILE *f;
f = fopen("test.bmp","wb");
fwrite(&bfh, sizeof(BITMAPFILEHEADER), 1, f);
fwrite(&bih, sizeof(BITMAPINFOHEADER), 1, f);
int i,j;
for(i = 0; i < bih.biHeight; i++)
{
for(j = 0; j < bih.biWidth; j++)
{
fwrite(&rgb,sizeof(COLORREF_RGB),1,f);
}
}
fclose(f);
return 0;
}
and jet every time I compile and run it I get error saying that its not valid BMP image. I double checked all values in multiple references and still can't find error here.
Did I misunderstood something and what am I doing wrong here?
Also not sure if important but I am using Ubuntu 14.04 to compile this.
EDIT
Found one more issue :
bfh.bfType = 0x424D;
should be
bfh.bfType = 0x4D42;
But still can't see image.
First of all, you set:
bfSize Specifies the size of the file, in bytes.
to invalid value, your code resulted into 1440112 while size of file is actually 1080112
bfh.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) +
bih.biWidth * bih.biHeight * sizeof(COLORREF_RGB);
Because sizeof(COLORREF_RGB) is actually 4 not 3.
Another mistake is that size of your structs and types:
expected size actual size*
typedef unsigned int UINT; // 2 4
typedef unsigned long DWORD; // 4 8
typedef long int LONG; // 4 8
typedef unsigned short WORD; // 2 2
typedef unsigned char BYTE; // 1 1
* I'm using gcc on x86_64 architecture
Your offsets just don't match with offsets on wikipedia, reference you are using was probably written for 16 bit compiler on 16 bit OS (as pointed out by cup in a comment) so it assumes int to be 2B type.
Using values from stdint.h worked for me (guide on stdint.h for Visual Studio here):
#include <stdint.h>
typedef uint16_t UINT;
typedef uint32_t DWORD;
typedef int32_t LONG;
typedef uint16_t WORD;
typedef uint8_t BYTE;
And last but not least you have to turn off memory alignment as suggested by Weather Vane.
I believe your field sizes to be incorrect, try this
#pragma pack(push, 1)
typedef struct tagBITMAPFILEHEADER {
WORD bfType;
DWORD bfSize;
WORD bfReserved1;
WORD bfReserved2;
DWORD bfOffBits;
} BITMAPFILEHEADER;
#pragma pack(pop)
You are trying to map a C structure to some externally-defined binary format There are a number of problems with this:
Size
typedef unsigned int UINT;
typedef unsigned long DWORD;
typedef long int LONG;
typedef unsigned short WORD;
typedef unsigned char BYTE;
The C language only specify the minimum sizes of these types. Their actual sizes can and do vary between different compilers and operating systems. The sizes and offset of the members in your structures may not be what you expect. The only size that you can rely on (on almost any system that you are likely to encounter these days) is char being 8 bits.
Padding
typedef struct tagBITMAPFILEHEADER {
UINT bfType;
DWORD bfSize;
UINT bfReserved1;
UINT bfReserved2;
DWORD bfOffBits;
} BITMAPFILEHEADER;
It is common for DWORD to be twice as large as a UINT, and in fact your program depends on it. This usually means that the compiler will introduce padding between bfType and bfSize to give the latter appropriate alignment for its type. The .bmp format has no such padding.
Order
BITMAPFILEHEADER bfh;
bfh.bfType = 0x424D;
...
fwrite(&bfh, sizeof(BITMAPFILEHEADER), 1, f);
Another problem is that the C language does not specify the endianess of the types. The .bfType member may be stored as [42][4D] (Big-Endian) or [4D][42] (Little-Endian) in memory. The .bmp format specifically requires the values to be stored in Little-Endian order.
Solution
You might be able to solve some of these problems by using compiler-specific extensions (such as #pragma's or compiler switches), but probably not all of them.
The only way to properly write an externally-defined binary format, is to use an array of unsigned char and write the values a byte at a time. Personally, I would write a set of helper functions for specific types:
void w32BE (unsigned char *p, unsigned long ul)
{
p[0] = (ul >> 24) & 0xff;
p[1] = (ul >> 16) & 0xff;
p[2] = (ul >> 8) & 0xff;
p[3] = (ul ) & 0xff;
}
void w32LE (unsigned char *p, unsigned long ul)
{
p[0] = (ul ) & 0xff;
p[1] = (ul >> 8) & 0xff;
p[2] = (ul >> 16) & 0xff;
p[3] = (ul >> 24) & 0xff;
}
/* And so on */
and some functions for writing the entire .bmp file or sections of it:
int function write_header (FILE *f, BITMAPFILEHEADER bfh)
{
unsigned char buf[14];
w16LE (buf , bfh.bfType);
w32LE (buf+ 2, bfh.bfSize);
w16LE (buf+ 6, bfh.bfReserved1);
w16LE (buf+ 8, bfh.bfReserved2);
w32LE (buf+10, bfh.bfOffBits);
return fwrite (buf, sizeof buf, 1, f);
}

cast of a structure into another one to get specific data

I have this structure representing a generic message
typedef struct {
uint16_t length;
uint8_t type1;
uint8_t type2;
uint8_t *data;
} generic_msg_t;
After reading type1 and type 2 I can know to which specific message it corresponds, for example this one:
typedef struct {
uint16_t length;
uint8_t type1;
uint8_t type2;
uint16_t a;
uint8_t b;
uint8_t c;
double d;
double e;
double f;
double g;
double h;
uint8_t i;
} specific_msg_t;
Supposing msg contains verified data, I would like to understand why if I do this I can not access to d, e, f, g, h data (but well a, b, c)
specific_msg_t * specific_msg = (specific_msg_t *) msg;
uint16_t a = specific_msg->a; //OK
double d = specific_msg->d; //NOK`
I have to do instead:
unsigned char * buffer = (unsigned char *) msg;
double d = buffer[15] + (buffer[14] << 8) + (buffer[13] << 16) + (buffer[12] << 24) + (buffer[11] << 32) + (buffer[10] << 40) + (buffer[9] << 48) + (buffer[8] << 56);`
specific_msg_t has potentially different alignment requirements than generic_msg_t so on some architectures, that code could result in a crash - the compiler has no particular requirement to align a generic_msg_t object on a boundary suitable for accessing a double.
It would help to know exactly what error you're getting though.
As long as the sender and receiver use the same headers (uint16_t is the same for both of them) and both of them use the same architecture (e.g. x86 or whatever) there shouldn't be too many problems.
I would also define a union with all the specific message types that you need, in order to avoid those nasty calculations and most of the casts.
Something similar to this:
typedef struct {
uint16_t length;
uint8_t type1;
uint8_t type2;
// whatever you need here...
double a;
float b;
// etc.
} specific_msg_t1;
typedef struct {
uint16_t length;
uint8_t type1;
uint8_t type2;
uint16_t a;
uint8_t b;
uint8_t c;
double d;
double e;
double f;
double g;
double h;
uint8_t i;
} specific_msg_t2;
typedef struct
{
uint16_t length;
uint8_t type1;
uint8_t type2;
} selector_msg_t;
typedef union
{
selector_msg_t selector;
specific_msg_t1 msgtype1;
specific_msg_t2 msgtype2;
} message_type_t;
With this setup you could be sending messages of type "message_type_t". Use the member "selector" to decide which specific type of message you received and then use the appropriate specific type to read the different data members.
If the size difference is considerable between the specific types, then it is a bad idea, but if you're not limited by bandwidth then it's at least readable and not that error prone.

How to break struct members into bytes of a single word

Say I have a C structure like:
typedef struct {
UINT8 nRow;
UINT8 nCol;
UINT16 nData; } tempStruct;
Is there a way to put all of those 3 members of the struct into a single 32-bit word, yet still be able to access them individually?
Something with the help of unions?
typedef struct {
UINT8 nRow;
UINT8 nCol;
UINT16 nData;
}
tempStruct;
typedef union {
tempStruct myStruct;
UINT32 myWord;
} stuff;
Or even better (with no "intermediate" struct):
#include <stdlib.h>
#include <stdio.h>
typedef union {
struct {
int nRow:8;
int nCol:8;
int nData:16;
};
int myWord;
} stuff;
int main(int args, char** argv){
stuff a;
a.myWord=0;
a.nCol=2;
printf("%d\n", a.myWord);
return 0;
}
What about just referring to it as a UINT32? It's not like C is type-safe.
tempStruct t;
t.nRow = 0x01;
t.nCol = 0x02;
t.nData = 0x04;
//put a reference to the struct as a pointer to a UINT32
UINT32* word = (UINT32 *) &t;
printf("%x", *word);
You can then get the value of the struct as a 32-bit word by dereferencing the pointer. The specifics of your system may matter, though...if I run this on my machine, the value of word is 0x00040201---that is, the fields are in reverse order. I don't think that's necessarily going to be the case if you're trying to serialize this to another system, so it's not portable.
If you want to actually store it as a 32-bit integer and then refer to the fields individually, why not
UINT32 word = 0x01020004;
and then somewhere else...
UINT8* row(UINT32 word) {
return (UINT8 *) &word + 3;
}
UINT8* col(UINT32 word) {
return ((UINT8 *) &word) + 2;
}
UINT16* data(UINT32 word) {
return ((UINT16 *) &word);
}
Macros will facilitate portable endianness.
Yes, you can use bit fields in C to do that. Something like:
typedef struct {
unsigned nRow : 8;
unsigned nCol : 8;
unsigned nData : 16;
} tempStruct;
If you want to control the memory layout also, you might want to take a look at #pragma pack. A non-portable option available on some compilers for this.
typedef struct {
int nRow:8;
int nCol:8;
int nData:16; } tempStruct;
nRow will take only 8 bit and nCol will take 8 bit and nDate will take 16bit.
This will work for you.
I just wrote sample program to see the size of it
#include<stdio.h>
typedef struct {
int nRow:8;
int nCol:8;
int nData:16; } tempStruct;
typedef struct {
int nRow;
int nCol;
int nData; } tempStructZ;
int main(void) {
printf("%d\n", sizeof(tempStruct));
printf("%d\n", sizeof(tempStructZ));
return 0;
}
Output:
4
16

Casting struct into int

Is there a clean way of casting a struct into an uint64_t or any other int, given that struct in <= to the sizeof int?
The only thing I can think of is only an 'ok' solution - to use unions. However I have never been fond of them.
Let me add a code snippet to clarify:
typedef struct {
uint8_t field: 5;
uint8_t field2: 4;
/* and so on... */
}some_struct_t;
some_struct_t some_struct;
//init struct here
uint32_t register;
Now how do i cast some_struct to capture its bits order in uint32_t register.
Hope that makes it a bit clearer.
I've just hit the same problem, and I solved it with a union like this:
typedef union {
struct {
uint8_t field: 5;
uint8_t field2: 4;
/* and so on... */
} fields;
uint32_t bits;
} some_struct_t;
/* cast from uint32_t x */
some_struct_t mystruct = { .bits = x };
/* cast to uint32_t */
uint32_t x = mystruct.bits;
HTH,
Alex
A non-portable solution:
struct smallst {
int a;
char b;
};
void make_uint64_t(struct smallst *ps, uint64_t *pi) {
memcpy(pi, ps, sizeof(struct smallst));
}
You may face problems if you, for example, pack the struct on a little-endian machine and unpack it on a big-endian machine.
you can use pointers and it will be easy
for example:
struct s {
int a:8;
int b:4;
int c:4;
int d:8;
int e:8; }* st;
st->b = 0x8;
st->c = 1;
int *struct_as_int = st;
hope it helps
You can cast object's pointer to desired type and then resolve it. I assume it can be a little bit slower than using unions or something else. But this does not require additional actions and can be used in place.
Short answer:
*(uint16_t *)&my_struct
Example:
#include <stdio.h>
#include <stdint.h>
typedef struct {
uint8_t field1;
uint8_t field2;
} MyStruct;
int main() {
MyStruct my_struct = {0xFA, 0x7D};
uint16_t num_my_struct = *(uint16_t *)&my_struct;
printf("%X \n", num_my_struct); // 7DFA
return 0;
}

Resources