Converting bit field to byte array - c

I have some mpeg ts bitfields, for example transport stream package:
struct ts_package_header_s {
unsigned int continuity_counter :4;
unsigned int adaptation_field_control :2;
unsigned int transport_scrambling_control :2;
unsigned int PID :13;
unsigned int transport_priority :1;
unsigned int payload_unit_start_indicator :1;
unsigned int transport_error_indicator :1;
unsigned int sync_byte :8;
};
struct ts_package_s {
struct ts_package_header_s ts_header;
unsigned char ts_body[TS_BODY];
};
union ts_package_u {
struct ts_package_s ts_package;
unsigned char bytes[TS_PACKAGE];
};
In my source code I initialize header struct:
pat_package_header.sync_byte = 0x47;
pat_package_header.transport_error_indicator = 0;
pat_package_header.payload_unit_start_indicator = 1;
pat_package_header.transport_priority = 0;
pat_package_header.PID = PAT_PID;
pat_package_header.transport_scrambling_control = 0;
pat_package_header.adaptation_field_control = 1;
pat_package_header.continuity_counter = 0;
And than I make ts_packag union
union ts_package_u package;
package.ts_package.ts_header = pat_package_header;
Than I fill ts_body array.
When I write this package to file. I get backwards array:
10 00 40 47 XX XX XX.. instead of 47 40 10 00 XX XX XX..
I tried to cast my struct to char* instead of using union, but got same result.
Where is fault? Thanks.

It's dangerous to use try and serialise structs like this directly to disk where the format must be known across different architectures, or use different compilers.
Compilers differ in how they store bitfields and the underlying endianess of your architecture also changes how the data is stored
For example, it could be that a compiler chooses to align bitfields on byte, word or some other boundary. It's a compiler decision. It may also choose to save the bits in any order, which usually depends on the endianess of your machine.
In order to safely write this header to disk, you need to serialise the data yourself. The header is 32-bit and Big Endian according to Wikipedia.
So for example:
#include <stdio.h>
#define TS_BODY 1024
#define PAT_PID 0x40
struct ts_package_header_s {
unsigned int continuity_counter :4;
unsigned int adaptation_field_control :2;
unsigned int transport_scrambling_control :2;
unsigned int PID :13;
unsigned int transport_priority :1;
unsigned int payload_unit_start_indicator :1;
unsigned int transport_error_indicator :1;
unsigned int sync_byte :8;
};
struct ts_package_s {
struct ts_package_header_s ts_header;
unsigned char ts_body[TS_BODY];
};
static void write_ts( struct ts_package_s pat_package )
{
FILE* f = fopen( "test.ts", "wb+" );
unsigned int header = 0;
if( f == NULL )
return;
header = pat_package.ts_header.sync_byte << 24;
header |= ( pat_package.ts_header.transport_error_indicator << 23 );
header |= ( pat_package.ts_header.payload_unit_start_indicator << 22 );
header |= ( pat_package.ts_header.transport_priority << 21 );
header |= ( pat_package.ts_header.PID << 8 );
header |= ( pat_package.ts_header.transport_scrambling_control << 6 );
header |= ( pat_package.ts_header.adaptation_field_control << 4 );
header |= ( pat_package.ts_header.continuity_counter );
/* Write the 32-bit header as big-endian */
unsigned char byte = header >> 24;
fwrite( &byte, 1, 1, f );
byte = ( header >> 16 ) & 0xFF;
fwrite( &byte, 1, 1, f );
byte = ( header >> 8 ) & 0xFF;
fwrite( &byte, 1, 1, f );
byte = header & 0xFF;
fwrite( &byte, 1, 1, f );
fclose( f );
}
int main( int argc, char* argv[] )
{
struct ts_package_s pat_package;
pat_package.ts_header.sync_byte = 0x47;
pat_package.ts_header.transport_error_indicator = 0;
pat_package.ts_header.payload_unit_start_indicator = 1;
pat_package.ts_header.transport_priority = 0;
pat_package.ts_header.PID = PAT_PID;
pat_package.ts_header.transport_scrambling_control = 0;
pat_package.ts_header.adaptation_field_control = 1;
pat_package.ts_header.continuity_counter = 0;
write_ts( pat_package );
return 0;
}
Writes a file with the following header:
0x47 0x40 0x01 0x10
which appears to be correct according to the values you're using.

Related

Turning a bitfield into a uint8_t

My bitfield below represents the 7 status flags for the 6502 CPU. I am trying to emulate the instruction php, which pushes a copy of the status flags onto the stack.
struct Flags {
uint8_t C: 1;
uint8_t Z: 1;
uint8_t I: 1;
uint8_t D: 1;
uint8_t V: 1;
uint8_t N: 1;
uint8_t B: 2;
};
I need a way to pack each field into a single-byte datatype like uint8_t so that I can push it to the stack. However, the code below gives this error: operand of type 'struct Flags' where arithmetic or pointer type is required. How do I resolve this problem?
int main() {
struct Flags f = {0, 0, 0, 0, 1, 0, 0};
uint8_t f_as_byte = (uint8_t) f;
}
The problem with bitfields is that it is implementation-defined in what order the bits are laid out. This could be rather unacceptable for a 6502 emulator. The PHP command must push the status word in the exact desired format, i.e. something like
uint8_t as_state = f.N << 7 | f.V << 6 | f.B << 4 | f.D << 3 | f.I << 2 | f.Z << 1 | f.C;
Your layout is wrong, the B member is in wrong position. All in all considering the complex code like one above, maybe it would be easier to just consider the flags as a single uint8_t and have access macros for it, something like
#define FLAG_N 0x80U
...
#define FLAG_C 0x1U
#define SET_FLAG(flag_var, flag) ((flag_var) |= (flag))
#define CLR_FLAG(flag_var, flag) ((flag_var) &= ~(flag))
#define GET_FLAG(flag_var, flag) ((_Bool)((flag_var) & (flag)))
uint8_t flags = 0;
SET_FLAG(flags, FLAG_C);
if (GET_FLAG(flags, FLAG_N)) { ... }
That way the PHP instruction can be coded to just push flags as is...
Except that the B flag that is pushed is not a flag from the status register...
You can do it with a union:
typedef unsigned char uint8_t;
struct Flags {
uint8_t C:1;
uint8_t Z:1;
uint8_t I:1;
uint8_t D:1;
uint8_t V:1;
uint8_t N:1;
uint8_t B:2;
};
union uFlags {
uint8_t B;
struct Flags F;
};
int
main()
{
struct Flags f = { 0, 0, 0, 0, 1, 0, 0 };
union uFlags u;
u.F = f;
uint8_t f_as_byte = u.B;
}
With a designated initializer, you may be able to initialize the union directly without a separate f:
typedef unsigned char uint8_t;
struct Flags {
uint8_t C:1;
uint8_t Z:1;
uint8_t I:1;
uint8_t D:1;
uint8_t V:1;
uint8_t N:1;
uint8_t B:2;
};
union uFlags {
uint8_t B;
struct Flags F;
};
int
main()
{
union uFlags u = {
.F = { .V = 1 }
};
uint8_t f_as_byte = u.B;
}
Instead of casting the structure as a (uint8_t), you should take its address and cast that as a (uint8_t *):
#include <stdio.h>
struct Flags {
uint8_t C: 1;
uint8_t Z: 1;
uint8_t I: 1;
uint8_t D: 1;
uint8_t V: 1;
uint8_t N: 1;
uint8_t B: 2;
};
int main() {
struct Flags f = {0, 0, 0, 0, 1, 0, 0};
uint8_t f_as_byte = *(uint8_t *)&f;
printf("flags: %.2X\n", f_as_byte);
return 0;
}
Note however that the actual bit values used to represent the bit-field members are implementation defined. The C Standard does not even guarantee that the Flags structure fit in a single byte. This make bit-fields a risky choice to represent hardware registers or instruction opcodes as a would make the emulator non-portable. Little-endian and big-endian systems typically use a different layout for bit-fields members.

Creating bmp file in C

I am trying to create .bmp file (filled with one colour for testing purposes).
Here is code that I'm using:
#include <stdio.h>
#define BI_RGB 0
typedef unsigned int UINT;
typedef unsigned long DWORD;
typedef long int LONG;
typedef unsigned short WORD;
typedef unsigned char BYTE;
typedef struct tagBITMAPFILEHEADER {
UINT bfType;
DWORD bfSize;
UINT bfReserved1;
UINT bfReserved2;
DWORD bfOffBits;
} BITMAPFILEHEADER;
typedef struct tagBITMAPINFOHEADER {
DWORD biSize;
LONG biWidth;
LONG biHeight;
WORD biPlanes;
WORD biBitCount;
DWORD biCompression;
DWORD biSizeImage;
LONG biXPelsPerMeter;
LONG biYPelsPerMeter;
DWORD biClrUsed;
DWORD biClrImportant;
} BITMAPINFOHEADER;
typedef struct COLORREF_RGB
{
BYTE cRed;
BYTE cGreen;
BYTE cBlue;
}COLORREF_RGB;
int main(int argc, char const *argv[])
{
BITMAPINFOHEADER bih;
bih.biSize = sizeof(BITMAPINFOHEADER);
bih.biWidth = 600;
bih.biHeight = 600;
bih.biSizeImage = bih.biWidth * bih.biHeight * 3;
bih.biPlanes = 1;
bih.biBitCount = 24;
bih.biCompression = BI_RGB;
bih.biXPelsPerMeter = 2835;
bih.biYPelsPerMeter = 2835;
bih.biClrUsed = 0;
bih.biClrImportant = 0;
COLORREF_RGB rgb;
rgb.cRed = 0;
rgb.cGreen = 0;
rgb.cBlue = 0;
BITMAPFILEHEADER bfh;
bfh.bfType = 0x424D;
bfh.bfReserved1 = 0;
bfh.bfReserved2 = 0;
bfh.bfOffBits = sizeof(BITMAPFILEHEADER) + bih.biSize;
bfh.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) +
bih.biWidth * bih.biHeight * 4;
FILE *f;
f = fopen("test.bmp","wb");
fwrite(&bfh, sizeof(BITMAPFILEHEADER), 1, f);
fwrite(&bih, sizeof(BITMAPINFOHEADER), 1, f);
int i,j;
for(i = 0; i < bih.biHeight; i++)
{
for(j = 0; j < bih.biWidth; j++)
{
fwrite(&rgb,sizeof(COLORREF_RGB),1,f);
}
}
fclose(f);
return 0;
}
and jet every time I compile and run it I get error saying that its not valid BMP image. I double checked all values in multiple references and still can't find error here.
Did I misunderstood something and what am I doing wrong here?
Also not sure if important but I am using Ubuntu 14.04 to compile this.
EDIT
Found one more issue :
bfh.bfType = 0x424D;
should be
bfh.bfType = 0x4D42;
But still can't see image.
First of all, you set:
bfSize Specifies the size of the file, in bytes.
to invalid value, your code resulted into 1440112 while size of file is actually 1080112
bfh.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) +
bih.biWidth * bih.biHeight * sizeof(COLORREF_RGB);
Because sizeof(COLORREF_RGB) is actually 4 not 3.
Another mistake is that size of your structs and types:
expected size actual size*
typedef unsigned int UINT; // 2 4
typedef unsigned long DWORD; // 4 8
typedef long int LONG; // 4 8
typedef unsigned short WORD; // 2 2
typedef unsigned char BYTE; // 1 1
* I'm using gcc on x86_64 architecture
Your offsets just don't match with offsets on wikipedia, reference you are using was probably written for 16 bit compiler on 16 bit OS (as pointed out by cup in a comment) so it assumes int to be 2B type.
Using values from stdint.h worked for me (guide on stdint.h for Visual Studio here):
#include <stdint.h>
typedef uint16_t UINT;
typedef uint32_t DWORD;
typedef int32_t LONG;
typedef uint16_t WORD;
typedef uint8_t BYTE;
And last but not least you have to turn off memory alignment as suggested by Weather Vane.
I believe your field sizes to be incorrect, try this
#pragma pack(push, 1)
typedef struct tagBITMAPFILEHEADER {
WORD bfType;
DWORD bfSize;
WORD bfReserved1;
WORD bfReserved2;
DWORD bfOffBits;
} BITMAPFILEHEADER;
#pragma pack(pop)
You are trying to map a C structure to some externally-defined binary format There are a number of problems with this:
Size
typedef unsigned int UINT;
typedef unsigned long DWORD;
typedef long int LONG;
typedef unsigned short WORD;
typedef unsigned char BYTE;
The C language only specify the minimum sizes of these types. Their actual sizes can and do vary between different compilers and operating systems. The sizes and offset of the members in your structures may not be what you expect. The only size that you can rely on (on almost any system that you are likely to encounter these days) is char being 8 bits.
Padding
typedef struct tagBITMAPFILEHEADER {
UINT bfType;
DWORD bfSize;
UINT bfReserved1;
UINT bfReserved2;
DWORD bfOffBits;
} BITMAPFILEHEADER;
It is common for DWORD to be twice as large as a UINT, and in fact your program depends on it. This usually means that the compiler will introduce padding between bfType and bfSize to give the latter appropriate alignment for its type. The .bmp format has no such padding.
Order
BITMAPFILEHEADER bfh;
bfh.bfType = 0x424D;
...
fwrite(&bfh, sizeof(BITMAPFILEHEADER), 1, f);
Another problem is that the C language does not specify the endianess of the types. The .bfType member may be stored as [42][4D] (Big-Endian) or [4D][42] (Little-Endian) in memory. The .bmp format specifically requires the values to be stored in Little-Endian order.
Solution
You might be able to solve some of these problems by using compiler-specific extensions (such as #pragma's or compiler switches), but probably not all of them.
The only way to properly write an externally-defined binary format, is to use an array of unsigned char and write the values a byte at a time. Personally, I would write a set of helper functions for specific types:
void w32BE (unsigned char *p, unsigned long ul)
{
p[0] = (ul >> 24) & 0xff;
p[1] = (ul >> 16) & 0xff;
p[2] = (ul >> 8) & 0xff;
p[3] = (ul ) & 0xff;
}
void w32LE (unsigned char *p, unsigned long ul)
{
p[0] = (ul ) & 0xff;
p[1] = (ul >> 8) & 0xff;
p[2] = (ul >> 16) & 0xff;
p[3] = (ul >> 24) & 0xff;
}
/* And so on */
and some functions for writing the entire .bmp file or sections of it:
int function write_header (FILE *f, BITMAPFILEHEADER bfh)
{
unsigned char buf[14];
w16LE (buf , bfh.bfType);
w32LE (buf+ 2, bfh.bfSize);
w16LE (buf+ 6, bfh.bfReserved1);
w16LE (buf+ 8, bfh.bfReserved2);
w32LE (buf+10, bfh.bfOffBits);
return fwrite (buf, sizeof buf, 1, f);
}

How to put a hex value into a 24 bit struct

So i have 2 structs:
struct cmd {
uint8_t a;
uint8_t b;
uint8_t c;
};
typedef struct someName{
uint8_t size;
struct cmd cmdID;
} someName_t;
And i got a char res[0] containing the string "0xabc".
This 0xabc need to be put inside the cmd struct.
But the problem is 0xabc is 12 bit (1010 1011 1100) so if i put this into the struct with only uint8_t a and uint8_t b it will work because it will "fit" into 16 bits. But i got uint8_t a, uint8_t b and uint8_t c so 24 bits and that is my problem..
I tried:
someName_t msg;
sscanf(res[0], "0x%x", &(msg.cmdID));
But this does not work. This does work however if i remove the uint8_t c variable from the struct because it then fits inside the remaining 16 bits..
So how can i get the value "0xabc" into this (24bit) struct without adjusting the struct.
If you have control of your input format, i.e. you can guarantee it will always be something like 0xabc, then you can try:
const char input[] = "0xabc";
uint32_t tmp;
sscanf(input, "0x%x", &tmp);
struct cmd cmdid;
cmdid.a = (tmp & 0xFF0000U) >> 16;
cmdid.b = (tmp & 0xFF00U) >> 8;
cmdid.c = (tmp & 0xFFU);
You could try changing struct cmd to:
struct cmd {
unsigned int c:4;
unsigned int b:4;
unsigned int a:4;
};
But YMMV depending on compiler. Works OK on MSVC++ 2010.

Trouble with Writing BMP file in C

I am working on inputting a BMP and then manipulating the nearly every pixel in my 2D pixelarray and storing it in the array 2D pixelarrayout (which is seen inside of the first nested for loop in the final code segment below). I feel that the outputted BMP should be functional and everything seems to be alright when looking at the file through a Hex editor but for some reason the file cannot open properly. Any ideas why? Possibly an error in the header, but the first 54 bytes (size of both headers) of each file are the same
//make an instance of all output structure
HEADER dataoutput;
INFOHEADER data2output;
char pixeloutput[pixelnum];
int pixcount=0;
for(r=0; r<rows; r++){
for(c=0; c<collumns;c++){
pixeloutput[pixcount]=pixelarrayout[r][c].Red;
pixcount++;
}
}
dataoutput.Type = data.Type;
dataoutput.Size = data.Size;
dataoutput.Reserved1 = data.Reserved1;
dataoutput.Reserved2 = data.Reserved2;
dataoutput.Offset = data.Offset;
data2output.Size = data2.Size;
data2output.Width = data2.Width;
data2output.Height = data2.Height;
data2output.Planes = data2.Planes;
data2output.Bits = data2.Bits;
data2output.Compression = data2.Compression;
data2output.ImageSize = data2.ImageSize;
data2output.xResolution = data2.xResolution;
data2output.yResolution = data2.yResolution;
data2output.Colors = 0;
data2output.ImportantColors = 0;
if( !(fileout = fopen( "CUoutput4.bmp","wb")))return 0;
//calloc an array of 16 bytes set to NULL to be used to pad the end of the file
char *nullarray;
nullarray = (char*) calloc(16,sizeof(char));
fwrite(&dataoutput,sizeof(HEADER),1,fileout);
fwrite(&data2output,sizeof(INFOHEADER),1,fileout);
int i;
char *pixelvaluehold;
for(i=0;i<pixelnum;i++){
pixelvaluehold = &pixeloutput[i];
fwrite(pixelvaluehold,sizeof(char),1,fileout);
}
int count=0;
while(( count + sizeof(HEADER) + sizeof(INFOHEADER) + pixelnum * sizeof(PIXEL) ) % 16 != 0){
fwrite(nullarray,sizeof(char),1,fileout);
count++;
}
fclose(file);
fclose(fileout);
free(pixelarray);
free(pixelarrayout);
And here are the structs for HEADER,INFOHEADER, and PIXEL
//define structures
typedef struct
{ unsigned short int Type; /* Magic identifier */
unsigned int Size; /* File size in bytes */
unsigned short int Reserved1, Reserved2;
unsigned int Offset; /* Offset to data (in B)*/
}HEADER; /* -- 14 Bytes -- */
typedef struct
{ unsigned int Size; /* Header size in bytes */
unsigned int Width, Height; /* Width / Height of image */
unsigned short int Planes; /* Number of colour planes */
unsigned short int Bits; /* Bits per pixel */
unsigned int Compression; /* Compression type */
unsigned int ImageSize; /* Image size in bytes */
int xResolution, yResolution;/* Pixels per meter */
unsigned int Colors; /* Number of colors */
unsigned int ImportantColors;/* Important colors */
}INFOHEADER; /* -- 40 Bytes -- */
//restor original stack allignment
#pragma pack(pop)
typedef struct
{ unsigned char Red, Blue, Green;
}PIXEL;

kernel project des encryption

In a kernel level project I want to encrypt a packet using des algorithm , need to use the function
static int des_setkey(void *ctx,const u8 *key,
unsigned int keylen,
u32 *flags)
I want to know how the key to be passed to the function , which value to be used as the to the key , its a pointer type so will be an address value
http://lxr.kelp.or.kr/ident?v=2.6.28;i=des_setkey
* Cryptographic API.
*
* s390 implementation of the DES Cipher Algorithm.
*
* Copyright IBM Corp. 2003,2007
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber#de.ibm.com)
typeA A=xy;
functionname(typeA* a, typeA b)
--> functionname(&A,A);
crypto_des_check_key(const u8 *key, unsigned int keylen, u32 *flags)
{
u32 n, w;
n = parity[key[0]]; n <<= 4;
n |= parity[key[1]]; n <<= 4;
n |= parity[key[2]]; n <<= 4;
n |= parity[key[3]]; n <<= 4;
n |= parity[key[4]]; n <<= 4;
n |= parity[key[5]]; n <<= 4;
n |= parity[key[6]]; n <<= 4;
n |= parity[key[7]];
w = 0x88888888L;
if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY)
&& !((n - (w >> 3)) & w)) { /* 1 in
-->CRYPTO_TFM_REQ_WEAK_KEY is a flag
--> source/include/linux/crypto.h
/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_RES_MASK 0xfff00000
#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
source/arch/s390/crypto/des_s390.c
#define DES_EXPKEY_WORDS 32
typedef unsigned char u8;
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
typedef unsigned long long u64;
typedef signed char s8;
typedef short s16;
typedef int s32;
typedef long long s64;
static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
{
return setkey(((struct des_ctx *)ctx)->expkey, key, keylen, flags);
}
struct des_ctx {
u32 expkey[DES_EXPKEY_WORDS];
};
This looks like it is from the 2.6.16 (ish) linux kernel. Later kernels have seen the crypto system api re-written slightly.
for this kernel this might help:-
*key is a pointer to a 16 byte array, ( unsigned char[16] ).
*ctx is a pointer to a struct des_ctx structure;
*flags is a pointer to a 32bit unsigned int that will have CRYPTO_TFM_RES_WEAK_KEY set if you have supplied a weak key ( one that fails FIPS 74 - see the ekey() function )

Resources