I want to basically process a struct array in a method in a dynamic library, but when I pass the array and print it (in the exact same manner as in my main program) it has different values.
Consider a struct like this:
struct color {
uint8_t b;
uint8_t g;
uint8_t r;
uint8_t a;
}
And the code to print it looks like this:
printf("pos: %p\n", array);
for (i = 0; i < size; i++) {
printf("bgra: %08x\n", ((uint32_t *) array)[i]);
}
Now, what I'm doing in the test program is this:
printf("Table:\n");
print(table, size);
and the output looks like this (as excepted):
pos: 0x7fff5b359530
bgra: 00000000
bgra: ff0000ff
bgra: ff00ffff
But when i execute the same code in a function in the library this is what i get:
pos: 0x7fff5b359530
bgra: 00000008
bgra: 00000030
bgra: 5b3598e0
Now I'm wondering what I'm doing wrong, since i can't see a fault in my code. Also, the values must correlate somehow since, the output is always the same (Except for the address of course).
header.h
#include <stdint.h>
#ifndef __HEADER_H_
#define __HEADER_H_
struct bmpt_color_bgra {
uint8_t b;
uint8_t g;
uint8_t r;
uint8_t a;
};
void print(struct bmpt_color_bgra *table, uint8_t size);
uint8_t *gen(struct bmpt_color_bgra *table, uint8_t size);
#endif
library.c
#include <stdlib.h>
#include <stdio.h>
#include "header.h"
#define EXPORT __attribute__((visibility("default")))
__attribute__((constructor))
static void initializer(void) {
printf("[%s] initializer()\n", __FILE__);
}
__attribute__((destructor))
static void finalizer(void) {
printf("[%s] finalizer()\n", __FILE__);
}
EXPORT
void print(struct bmpt_color_bgra *table, uint8_t size) {
uint8_t i;
printf("pos: %p\n", table);
for (i = 0; i < size; i++) {
printf("bgra: %08x\n", ((uint32_t *) table)[i]);
}
}
EXPORT
uint8_t *gen(struct bmpt_color_bgra *table, uint8_t size) {
printf("table in func:\n");
print(table, size);
}
test.c
#include <stdio.h>
#include <stdlib.h>
#include "header.h"
int main(int argc, char **argv) {
struct bmpt_color_bgra arr[3];
struct bmpt_color_bgra c;
c.b = 0x0;
c.g = 0x0;
c.r = 0x0;
c.a = 0x0;
arr[0] = c;
c.b = 0xff;
c.a = 0xff;
arr[1] = c;
c.r = 0xff;
arr[2] = c;
//the first result (the correct one)
print(arr, 3);
//the second result
gen(arr, 3);
}
This probably comes down to memory alignment of the members within the struct, and the size of the struct itself differing between your program and the dynamic/shared library. You don't mention which compiler you are using, but using different compiler(s) or compiler options for your program and the shared library could cause this effect.
You can preserve binary compatibility between modules by specifying exactly how the members of the struct should be aligned. E.g in GCC you can force how the struct is represented in memory by use of an attribute.
See https://gcc.gnu.org/onlinedocs/gcc-3.3/gcc/Type-Attributes.html for GCC alignment instructions
struct bmpt_color_bgra {
uint8_t b;
uint8_t g;
uint8_t r;
uint8_t a;
} __attribute__ ((packed));
Also take a look at Byte Alignment for integer (or other) types in a uint8_t array for a similar question.
Related
I have a code which looks as shown below. The code here is if the word_size = 64. In similar fashion I need to have for 32 and 16 too. I can't find a way to reuse the same encrypt function for all the sizes. Moreover, I need to declare the variables too according to the word_size, ie. to use either uint_16 or uint_32 or uint_64 depends upon word_size. Can you help me with writing a reusable code in this case?
#include<stdio.h>
#include<stdint.h>
void encrypt(uint64_t* , uint64_t*, uint64_t*);
int main(){
int block_size;
// Get the user inputs
printf("input the block size: \n");
scanf("%d", &block_size); // can be 32, 64 or 128
int word_size = block_size/2; // 16,32 or 64
// Depending on the word_size, I should declare the variables with
// corresponding width
uint64_t plain_text[2] = {0,0};
uint64_t cipher_text[2] = {0,0};
uint64_t key_text[2] = {0,0};
uint64_t * pt, *ct, *k;
encrypt(pt, ct,k);
}
/*
* Ecnryption Method
*/
void encrypt(uint64_t* pt, uint64_t* ct, uint64_t* k){
// Involves bit shifting algorithm which works only on exact sizes i.e eiter 16,32 or 64.
}
I can provide more information if needed.
There is a way to do this in C - by using struct and union
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
enum type {
U64,
U32,
U16,
U8,
};
struct container {
enum type type;
union {
uint64_t u64;
uint32_t u32;
uint16_t u16;
uint8_t u8;
} value;
};
int test(struct container container) {
switch(container.type) {
case U64:
printf("Value is :%" PRIu64 "\n", container.value);
break;
case U32:
printf("Value is :%" PRIu32 "\n", container.value);
break;
case U16:
printf("Value is :%" PRIu16 "\n", container.value);
break;
case U8:
printf("Value is :%" PRIu8 "\n", container.value);
break;
}
return 0;
}
int main(int argc, char **argv) {
struct container c1, c2;
c1.type = U64;
c1.value.u64 = 10000000000ULL;
c2.type = U8;
c2.value.u8 = 100;
test(c1);
test(c2);
return 0;
}
The output produced was:
Value is :10000000000
Value is :100
I am not receiving anything in buffer. Wherever I printf my buffer, it is always empty or shows garbage value. Can anyone help?
I defined header, packet and called them in my main, but buffer still shows garbage.
#include <stdint.h>
struct header {
uint16_t f1;
uint16_t f2;
uint32_t f3;
};
struct data {
uint16_t pf1;
uint64_t pf2;
};
#include <arpa/inet.h>
#include <string.h>
#include <stdint.h>
#include "packet.h"
void htonHeader(struct header h, char buffer[8]) {
uint16_t u16;
uint32_t u32;
u16 = htons(h.f1);
memcpy(buffer+0, &u16, 2);
printf("Value of buff is: %hu\n",buffer);
u16 = htons(h.f2);
memcpy(buffer+2, &u16, 2);
u32 = htonl(h.f3);
memcpy(buffer+4, &u32, 4);
}
void htonData(struct data d, char buffer[10]) {
uint16_t u16;
uint32_t u32;
u16 = htons(d.pf1);
memcpy(buffer+0, &u16, 2);
u32 = htonl(d.pf2>>32);
memcpy(buffer+2, &u32, 4);
u32 = htonl(d.pf2);
memcpy(buffer+6,&u32, 4);
}
void HeaderData(struct header h, struct data d, char buffer[18]) {
htonHeader(h, buffer+0);
htonData(d, buffer+8);
printf("buff is: %s\n",buffer);
}
#include <stdio.h>
#include "packet.c"
#include <string.h>
#include<stdlib.h>
int main(){
struct header h;
struct data d;
char buff[18];
//printf("Packet is: %s\n",buff);
printf("Generating Packets..... \n");
h.f1=1;
d.pf1=2;
h.f2=3;
d.pf2=4;
h.f3=5;
HeaderData(h,d,buff);
strcat(buff,buff+8);
printf("Packet is: %s\n",buff);
return 0;
}
The problem is that your printf()s are either syntactically wrong (printf( "%hu", ... ); expects an unsigned short as parameter, but you pass a pointer) or you try to print buff by using "%s" but the content is binary, not text. What you could do instead was doing some kind of hexdump, like:
int i;
for( i=0; i<sizeof( buff ); i++ ) {
printf( "%x ", buff[i] & 0xff );
}
puts( "" ); // terminate the line
Please note, that using sizeof works im main() only, in the other function you've got to determine the buffer size differently.
Besides: because of the binary content of buff, you can't use strcat(). Even if you have made sure that there is a '\0' behind the last value you have copied (I haven't checked if you have), depending on the integer values you copy, there may be another '\0' value before that one and strcat() would overwrite everything form that point on.
I am trying to do packed struct in tcc C compiller.
Code is as follows and __attribute __ tag should be supported:
#include <stdio.h>
#include <stdint.h>
typedef struct _test_t{
char c;
uint16_t i;
char d;
} __attribute__((__packed__)) test_t;
int main(){
test_t x;
x.c = 0xCC;
x.i = 0xAABB;
x.d = 0xDD;
const char *s = (const char *) & x;
unsigned i;
for(i = 0; i < sizeof(x); i++)
printf("%3u %x\n", i, 0xFF & s[i]);
return 0;
}
It works in gcc, but not work on tcc.
I also tried __attribute __((packed)) and few other tests - none works.
As you already found the __attribute__ extension applies only to struct's members, so each of them should have it applied individually. Here is your code with minor adaptations, that compiles with tcc 0.9.26 and then runs with correct output:
typedef struct {
char c __attribute__((packed));
unsigned short i __attribute__((packed));
char d __attribute__((packed));
} test_t;
int main(void)
{
test_t x;
printf("%zu\n", sizeof(test_t));
x.c = 0xCC;
x.i = 0xAABB;
x.d = 0xDD;
const char *s = (const char *) &x;
unsigned i;
for (i = 0; i < sizeof(x); i++)
printf("%3u %x\n", i, 0xFF & s[i]);
return 0;
}
Result:
4
0 cc
1 bb
2 aa
3 dd
There is one catch here. As you may already spotted there are no headers. The correctly written code should have:
#include <stdio.h>
#include <stdint.h> // then replace unsigned short with uint16_t
However, with headers the __attribute__ is no longer working. I am not sure if that always happen, but on my system (CentOS 6) it does exactly in that way.
As I found the explanation lies in internal sys/cdefs.h header, that contains:
/* GCC has various useful declarations that can be made with the
`__attribute__' syntax. All of the ways we use this do fine if
they are omitted for compilers that don't understand it. */
#if !defined __GNUC__ || __GNUC__ < 2
# define __attribute__(xyz) /* Ignore */
#endif
so the __attribute__ function-like macro is "washed-up" for tcc, as it does not define __GNUC__ macro. It seems to be some incoherence between tcc developers and standard library (here glibc) writers.
I can confirm that at least with tcc 0.9.26 attribute((packed)) on
struct members is not working. Using Windows-style packing pragmas works just fine:
#if defined(__TINYC__)
#pragma pack(1)
#endif
typedef struct {
uint16_t ..
} interrupt_gate_descriptor_t;
#if defined(__TINYC__)
#pragma pack(1)
#endif
Seems to be error with TCC.
according many sources , including this one, http://wiki.osdev.org/TCC
this should work:
struct some_struct {
unsigned char a __attribute__((packed));
unsigned char b __attribute__((packed));
} __attribute__((packed));
... but it does not work.
Say I have a C structure like:
typedef struct {
UINT8 nRow;
UINT8 nCol;
UINT16 nData; } tempStruct;
Is there a way to put all of those 3 members of the struct into a single 32-bit word, yet still be able to access them individually?
Something with the help of unions?
typedef struct {
UINT8 nRow;
UINT8 nCol;
UINT16 nData;
}
tempStruct;
typedef union {
tempStruct myStruct;
UINT32 myWord;
} stuff;
Or even better (with no "intermediate" struct):
#include <stdlib.h>
#include <stdio.h>
typedef union {
struct {
int nRow:8;
int nCol:8;
int nData:16;
};
int myWord;
} stuff;
int main(int args, char** argv){
stuff a;
a.myWord=0;
a.nCol=2;
printf("%d\n", a.myWord);
return 0;
}
What about just referring to it as a UINT32? It's not like C is type-safe.
tempStruct t;
t.nRow = 0x01;
t.nCol = 0x02;
t.nData = 0x04;
//put a reference to the struct as a pointer to a UINT32
UINT32* word = (UINT32 *) &t;
printf("%x", *word);
You can then get the value of the struct as a 32-bit word by dereferencing the pointer. The specifics of your system may matter, though...if I run this on my machine, the value of word is 0x00040201---that is, the fields are in reverse order. I don't think that's necessarily going to be the case if you're trying to serialize this to another system, so it's not portable.
If you want to actually store it as a 32-bit integer and then refer to the fields individually, why not
UINT32 word = 0x01020004;
and then somewhere else...
UINT8* row(UINT32 word) {
return (UINT8 *) &word + 3;
}
UINT8* col(UINT32 word) {
return ((UINT8 *) &word) + 2;
}
UINT16* data(UINT32 word) {
return ((UINT16 *) &word);
}
Macros will facilitate portable endianness.
Yes, you can use bit fields in C to do that. Something like:
typedef struct {
unsigned nRow : 8;
unsigned nCol : 8;
unsigned nData : 16;
} tempStruct;
If you want to control the memory layout also, you might want to take a look at #pragma pack. A non-portable option available on some compilers for this.
typedef struct {
int nRow:8;
int nCol:8;
int nData:16; } tempStruct;
nRow will take only 8 bit and nCol will take 8 bit and nDate will take 16bit.
This will work for you.
I just wrote sample program to see the size of it
#include<stdio.h>
typedef struct {
int nRow:8;
int nCol:8;
int nData:16; } tempStruct;
typedef struct {
int nRow;
int nCol;
int nData; } tempStructZ;
int main(void) {
printf("%d\n", sizeof(tempStruct));
printf("%d\n", sizeof(tempStructZ));
return 0;
}
Output:
4
16
Is there a clean way of casting a struct into an uint64_t or any other int, given that struct in <= to the sizeof int?
The only thing I can think of is only an 'ok' solution - to use unions. However I have never been fond of them.
Let me add a code snippet to clarify:
typedef struct {
uint8_t field: 5;
uint8_t field2: 4;
/* and so on... */
}some_struct_t;
some_struct_t some_struct;
//init struct here
uint32_t register;
Now how do i cast some_struct to capture its bits order in uint32_t register.
Hope that makes it a bit clearer.
I've just hit the same problem, and I solved it with a union like this:
typedef union {
struct {
uint8_t field: 5;
uint8_t field2: 4;
/* and so on... */
} fields;
uint32_t bits;
} some_struct_t;
/* cast from uint32_t x */
some_struct_t mystruct = { .bits = x };
/* cast to uint32_t */
uint32_t x = mystruct.bits;
HTH,
Alex
A non-portable solution:
struct smallst {
int a;
char b;
};
void make_uint64_t(struct smallst *ps, uint64_t *pi) {
memcpy(pi, ps, sizeof(struct smallst));
}
You may face problems if you, for example, pack the struct on a little-endian machine and unpack it on a big-endian machine.
you can use pointers and it will be easy
for example:
struct s {
int a:8;
int b:4;
int c:4;
int d:8;
int e:8; }* st;
st->b = 0x8;
st->c = 1;
int *struct_as_int = st;
hope it helps
You can cast object's pointer to desired type and then resolve it. I assume it can be a little bit slower than using unions or something else. But this does not require additional actions and can be used in place.
Short answer:
*(uint16_t *)&my_struct
Example:
#include <stdio.h>
#include <stdint.h>
typedef struct {
uint8_t field1;
uint8_t field2;
} MyStruct;
int main() {
MyStruct my_struct = {0xFA, 0x7D};
uint16_t num_my_struct = *(uint16_t *)&my_struct;
printf("%X \n", num_my_struct); // 7DFA
return 0;
}