i have a byte array (64-bit unsigned integer) :
byte array[8] = { 0x01,0xc9,0x98,0x57,0xd1,0x47,0xf3,0x60 }
i want to translate it into decimal..
when i'am using the calculator windows the result is :
128801567297500000
i don't find a way to do it in winapi or C ..
Any help is appreciated.
for a 4 bytes array i use the working code below
BYTE array[4] = { 0xC3,0x02,0x00,0x00 };
printf("Result : %d\n",(array[0]) | (array[1]) <<8 |(array[2]) <<16 | (array[3]) <<24 );
Result : 707
Cast the bytes to 64bit before the shifting. Currently they are implicitly promoted to int, which is a 32bit data type.
Assuming you use stdint:
uint64_t result = ((uint64_t)b[0]) | ((uint64_t)b[1] << 8) | ((uint64_t)b[2] << 16) | ((uint64_t)b[3] << 24) | ((uint64_t)b[4] << 32) | ((uint64_t)b[5] << 40) | ((uint64_t)b[6] << 48) | ((uint64_t)b[7] << 56);
or in reverse order (array is little endian; this will get the result you're seeing in windows calculator):
uint64_t result = ((uint64_t)b[7]) | ((uint64_t)b[6] << 8) | ((uint64_t)b[5] << 16) | ((uint64_t)b[4] << 24) | ((uint64_t)b[3] << 32) | ((uint64_t)b[2] << 40) | ((uint64_t)b[1] << 48) | ((uint64_t)b[0] << 56);
well, you can use
sprintf() to print the positional hex values to a string.
convert that string to decimal using strtoll() using base 16.
Sample code:
#include <stdio.h>
#include <stdlib.h>
#define SIZE 128
int main()
{
char array[8] = { 0x01,0xc9,0x98,0x57,0xd1,0x47,0xf3,0x60 };
char arr[SIZE] = {0};
int i = 0;
unsigned long long res = 0;
for (i = 0; i < 8; i++)
sprintf((arr + (i * 2)), "%2x", (array[i] & 0xff));
printf("arr is %s\n", arr);
res = strtoll(arr, NULL, 16);
printf("res is %llu\n", res);
return 0;
}
int i;
byte array[8] = { 0x01,0xc9,0x98,0x57,0xd1,0x47,0xf3,0x60 };
unsigned long long v;
//Change of endian
for(i=0;i<4;++i){
byte temp = array[i];
array[i] = array[7-i];
array[7-i] = temp;
}
v = memcpy(&v, array, sizeof(v));//*(unsigned long long*)array;
printf("%llu ", v);
Related
I have the following:
payload.data[i].data = (buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
note: payload.data[i].dataif of type float.
Using this: printf("test:%X%X%X%X", buf[8], buf[9], buf[10], buf[11]);, I have confirmed that the buffer contains #42C78A3D or DEC 99.77 (roughly).
This: printf("Float value:%f", payload.data[i].data prints out 1120373248.00, which is DEC for #42C78A00
It seems to me that for some reason, buf[11] is coming up empty.
Here is a more complete view of my code:
int i = 0;
int j = 0;
struct sensor_payload payload;
payload.key = (buf[0] << 8) | buf[1];
payload.id = (buf[2] << 8) | buf[3];
payload.type = (buf[4] << 8) | buf[5];
payload.fields = buf[6];
for(i = 0, j = 0; i < payload.fields; i++, j = j +33){
payload.data[i].data_type = buf[j+7];
payload.data[i].data = (buf[j+8] << 24) | (buf[j+9] << 16) | (buf[j+10] << 8) | (buf[j+11]);
slog(0, SLOG_DEBUG, "test:%X%X%X%X", buf[8], buf[9], buf[10], buf[11]);
}
payload.valid = true;
return payload;
and the definitions:
struct sensor_data{
uint8_t data_type;
float data;
};
struct sensor_payload{
uint16_t key, id, type;
uint8_t fields;
struct sensor_data data[4];
bool valid;
};
There's a lot in you code which is unclear. However, the only discrepancy I see is that you're putting index-offset entries into your data (notice the j+x pattern):
(buf[j+8] << 24) | (buf[j+9] << 16) | (buf[j+10] << 8) | (buf[j+11])
while printing non-offset entires:
slog(0, SLOG_DEBUG, "test:%X%X%X%X", buf[8], buf[9], buf[10], buf[11])
Technically, data you're printing and putting into the float is only the same for i==0.
I have a matrix (2-D int pointer int **mat) that I am trying to write to a file in Linux in Little-endian convention.
Here is my function that writes to the file:
#define BUFF_SIZE 4
void write_matrix(int **mat, int n, char *dest_file) {
int i, j;
char buff[BUFF_SIZE];
int fd = open(dest_file, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR | S_IXUSR);
if (fd < 0) {
printf("Error: Could not open the file \"%s\".\n", dest_file);
}
buff[0] = (n & 0x000000ff);
buff[1] = (n & 0x0000ff00) >> 8;
buff[2] = (n & 0x00ff0000) >> 16;
buff[3] = (n & 0xff000000) >> 24;
write(fd, buff, BUFF_SIZE);
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
buff[0] = (mat[i][j] & 0x000000ff);
buff[1] = (mat[i][j] & 0x0000ff00) >> 8;
buff[2] = (mat[i][j] & 0x00ff0000) >> 16;
buff[3] = (mat[i][j] & 0xff000000) >> 24;
if (write(fd, buff, BUFF_SIZE) != BUFF_SIZE) {
close(fd);
printf("Error: could not write to file.\n");
return;
}
}
}
close(fd);
}
The problem is that when I write out a matrix large enough of the form mat[i][i] = i (let's say 512 X 512), I think I get an overflow, since I get weird negative numbers.
To convert back I use:
void read_matrix(int fd, int **mat, int n, char buff[]) {
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
assert(read(fd, buff, BUFF_SIZE) == BUFF_SIZE);
mat[i][j] = byteToInt(buff);
}
}
}
int byteToInt(char buff[]) {
return (buff[3] << 24) | (buff[2] << 16) | (buff[1] << 8) | (buff[0]);
}
What an I doing wrong?
EDITED:
Added the read_matrix function.
It seems like I'm getting a short instead on an int, since 384 = (110000000) becomes -128 = (bin) 1000000
Did a test, and found out that:
char c = 128;
int i = 0;
i |= c;
gives i = -128. Why????
The problem is in your input conversion:
int byteToInt(char buff[]) {
return (buff[3] << 24) | (buff[2] << 16) | (buff[1] << 8) | (buff[0]);
}
You don't mention which platform you are on, but on most common platforms char is signed. And that will cause problems. Suppose, for example, that buff[1] is 0x80 (0b1000000). Since it is a signed value, that is the code for the value -128. And since shift operators start by doing integer promotions on both of their arguments, that will be converted to the integer -128 before the shift operation is performed; in other words, it will have the value 0xFFFFFF80, which will become 0xFFFF8000 after the shift.
The bitwise logical operators (such as |) perform the usual arithmetic conversions before doing the bitwise operations; in the case of (buff[1] << 8) | (buff[0]), the left-hand operator will already be a signed int (because the type of << is the type of its promoted left-hand argument); the right-hand argument, an implicitly signed char, will also be promoted to a signed int, so again if it were 0x80, it would end up being sign-extended to 0xFFFFFF80.
In either case, the bitwise-or operation will end up with unwanted high-order 1 bits.
Explicitly casting buff[x] to an unsigned int won't help, because it will first be sign-extended to an int before being reinterpreted as an unsigned int. Instead, it is necessary to cast it to an unsigned char:
int byteToInt(char buff[]) {
return ((unsigned char)buff[3] << 24)
| ((unsigned char)buff[2] << 16)
| ((unsigned char)buff[1] << 8)
| (unsigned char)buff[0];
}
Since int may be 16-bit, it would be better to use long, and indeed it would be better to use unsigned long to avoid other conversion issues. That means doing a double cast:
unsigned long byteToInt(char buff[]) {
return ((unsigned long)(unsigned char)buff[3] << 24)
| ((unsigned long)(unsigned char)buff[2] << 16)
| ((unsigned long)(unsigned char)buff[1] << 8)
| (unsigned long)(unsigned char)buff[0];
}
What you have is an undefined behaviour often overlooked. Left shifting of signed negative values is undefined. See here for details.
When you do this
int byteToInt(char buff[]) {
return (buff[3] << 24) | (buff[2] << 16) | (buff[1] << 8) | (buff[0]);
}
even if one element of buff has a negative value (I.e. one of the binary data's value sets the MSB) then you hit undefined behaviour. Since your data is binary, reading it as unsigned makes the most sense. You could use a standard type which makes the signedness and length explicit, such as uint8_t from stdint.h.
I need to make an assignment where I switch the values of a certain int. For example: 0xaabbccdd should be turned in to 0xddccbbaa.
I've already extraced all of the bytes from the given number and their values are correct.
unsigned int input;
scanf("%i", &input);
unsigned int first_byte = (input >> (8*0)) & 0xff;
unsigned int second_byte = (input >> (8*1)) & 0xff;
unsigned int third_byte = (input >> (8*2)) & 0xff;
unsigned int fourth_byte = (input >> (8*3)) & 0xff;
Now I'm trying to set an empty int variable (aka 00000000 00000000 00000000 00000000) to those byte values, but turned around. So how can I say that the first byte of the empty variable is the fourth byte of the given input? I've been trying different combinations of bitwise operations, but I can't seem to wrap my head around it. I'm pretty sure I should be able to do something like:
answer *first byte* | fourth_byte;
I would appreciate any help, becau'se I've been stuck and searching for an answer for a couple of hours now.
Based on your code :
#include <stdio.h>
int main(void)
{
unsigned int input = 0xaabbccdd;
unsigned int first_byte = (input >> (8*0)) & 0xff;
unsigned int second_byte = (input >> (8*1)) & 0xff;
unsigned int third_byte = (input >> (8*2)) & 0xff;
unsigned int fourth_byte = (input >> (8*3)) & 0xff;
printf(" 1st : %x\n 2nd : %x\n 3rd : %x\n 4th : %x\n",
first_byte,
second_byte,
third_byte,
fourth_byte);
unsigned int combo = first_byte<<8 | second_byte;
combo = combo << 8 | third_byte;
combo = combo << 8 | fourth_byte;
printf(" combo : %x ", combo);
return 0;
}
It will output 0xddccbbaa
Here's a more elegant function to do this :
unsigned int setByte(unsigned int input, unsigned char byte, unsigned int position)
{
if(position > sizeof(unsigned int) - 1)
return input;
unsigned int orbyte = byte;
input |= byte<<(position * 8);
return input;
}
Usage :
unsigned int combo = 0;
combo = setByte(combo, first_byte, 3);
combo = setByte(combo, second_byte, 2);
combo = setByte(combo, third_byte, 1);
combo = setByte(combo, fourth_byte, 0);
printf(" combo : %x ", combo);
unsigned int result;
result = ((first_byte <<(8*3)) | (second_byte <<(8*2)) | (third_byte <<(8*1)) | (fourth_byte))
You can extract the bytes and put them back in order as you're trying, that's a perfectly valid approach. But here are some other possibilities:
bswap, if you have access to it. It's an x86 instruction that does exactly this. It doesn't get any simpler. Similar instructions may exist on other platforms. Probably not good for a C assignment though.
Or, swapping adjacent "fields". If you have AABBCCDD and first swap adjacent 8-bit groups (get BBAADDCC), and then swap adjacent 16-bit groups, you get DDCCBBAA as desired. This can be implemented, for example: (not tested)
x = ((x & 0x00FF00FF) << 8) | ((x >> 8) & 0x00FF00FF);
x = ((x & 0x0000FFFF) << 16) | ((x >> 16) & 0x0000FFFF);
Or, a closely related method but with rotates. In AABBCCDD, AA and CC are both rotated to the left by 8 positions, and BB and DD are both rotated right by 8 positions. So you get:
x = rol(x & 0xFF00FF00, 8) | ror(x & 0x00FF00FF, 8);
This requires rotates however, which most high level languages don't provide, and emulating them with two shifts and an OR negates their advantage.
#include <stdio.h>
int main(void)
{
unsigned int input = 0xaabbccdd,
byte[4] = {0},
n = 0,
output = 0;
do
{
byte[n] = (input >> (8*n)) & 0xff;
n = n + 1;
}while(n < 4);
n = 0;
do
{
printf(" %d : %x\n", byte[n]);
n = n + 1;
}while (n < 4);
n = 0;
do
{
output = output << 8 | byte[n];
n = n + 1;
}while (n < 4);
printf(" output : %x ", output );
return 0;
}
You should try to avoid repeating code.
How do I go about avoiding the compiler warning (warning: cast increases required alignment of target type) in the following code?
static int fill_color24 (VisVideo *video, VisColor *color)
{
int x, y;
uint32_t *buf;
uint8_t *rbuf = visual_video_get_pixels (video);
uint8_t *buf8;
int32_t cola =
(color->b << 24) |
(color->g << 16) |
(color->r << 8) |
(color->b);
int32_t colb =
(color->g << 24) |
(color->r << 16) |
(color->b << 8) |
(color->g);
int32_t colc =
(color->r << 24) |
(color->b << 16) |
(color->g << 8) |
(color->r);
for (y = 0; y < video->height; y++) {
buf = (uint32_t *) rbuf; // warning is for this line
for (x = video->width; x >= video->bpp; x -= video->bpp) {
*(buf++) = cola;
*(buf++) = colb;
*(buf++) = colc;
}
buf8 = (uint8_t *) buf;
*(buf8++) = color->b;
*(buf8++) = color->g;
*(buf8++) = color->r;
rbuf += video->pitch;
}
return VISUAL_OK;
}
I'm not sure you can. That function might return color array unaligned. You can't do anything to be able to read word from there.
You will have to read color by components (uint8_t) and then construct uint32_t from these by adding and shifting.
Closed. This question does not meet Stack Overflow guidelines. It is not currently accepting answers.
Questions must demonstrate a minimal understanding of the problem being solved. Tell us what you've tried to do, why it didn't work, and how it should work. See also: Stack Overflow question checklist
Closed 9 years ago.
Improve this question
I need to write a function to convert big endian to little endian in C. I can not use any library function.
Assuming what you need is a simple byte swap, try something like
Unsigned 16 bit conversion:
swapped = (num>>8) | (num<<8);
Unsigned 32-bit conversion:
swapped = ((num>>24)&0xff) | // move byte 3 to byte 0
((num<<8)&0xff0000) | // move byte 1 to byte 2
((num>>8)&0xff00) | // move byte 2 to byte 1
((num<<24)&0xff000000); // byte 0 to byte 3
This swaps the byte orders from positions 1234 to 4321. If your input was 0xdeadbeef, a 32-bit endian swap might have output of 0xefbeadde.
The code above should be cleaned up with macros or at least constants instead of magic numbers, but hopefully it helps as is
EDIT: as another answer pointed out, there are platform, OS, and instruction set specific alternatives which can be MUCH faster than the above. In the Linux kernel there are macros (cpu_to_be32 for example) which handle endianness pretty nicely. But these alternatives are specific to their environments. In practice endianness is best dealt with using a blend of available approaches
By including:
#include <byteswap.h>
you can get an optimized version of machine-dependent byte-swapping functions.
Then, you can easily use the following functions:
__bswap_32 (uint32_t input)
or
__bswap_16 (uint16_t input)
#include <stdint.h>
//! Byte swap unsigned short
uint16_t swap_uint16( uint16_t val )
{
return (val << 8) | (val >> 8 );
}
//! Byte swap short
int16_t swap_int16( int16_t val )
{
return (val << 8) | ((val >> 8) & 0xFF);
}
//! Byte swap unsigned int
uint32_t swap_uint32( uint32_t val )
{
val = ((val << 8) & 0xFF00FF00 ) | ((val >> 8) & 0xFF00FF );
return (val << 16) | (val >> 16);
}
//! Byte swap int
int32_t swap_int32( int32_t val )
{
val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF );
return (val << 16) | ((val >> 16) & 0xFFFF);
}
Update : Added 64bit byte swapping
int64_t swap_int64( int64_t val )
{
val = ((val << 8) & 0xFF00FF00FF00FF00ULL ) | ((val >> 8) & 0x00FF00FF00FF00FFULL );
val = ((val << 16) & 0xFFFF0000FFFF0000ULL ) | ((val >> 16) & 0x0000FFFF0000FFFFULL );
return (val << 32) | ((val >> 32) & 0xFFFFFFFFULL);
}
uint64_t swap_uint64( uint64_t val )
{
val = ((val << 8) & 0xFF00FF00FF00FF00ULL ) | ((val >> 8) & 0x00FF00FF00FF00FFULL );
val = ((val << 16) & 0xFFFF0000FFFF0000ULL ) | ((val >> 16) & 0x0000FFFF0000FFFFULL );
return (val << 32) | (val >> 32);
}
Here's a fairly generic version; I haven't compiled it, so there are probably typos, but you should get the idea,
void SwapBytes(void *pv, size_t n)
{
assert(n > 0);
char *p = pv;
size_t lo, hi;
for(lo=0, hi=n-1; hi>lo; lo++, hi--)
{
char tmp=p[lo];
p[lo] = p[hi];
p[hi] = tmp;
}
}
#define SWAP(x) SwapBytes(&x, sizeof(x));
NB: This is not optimised for speed or space. It is intended to be clear (easy to debug) and portable.
Update 2018-04-04
Added the assert() to trap the invalid case of n == 0, as spotted by commenter #chux.
If you need macros (e.g. embedded system):
#define SWAP_UINT16(x) (((x) >> 8) | ((x) << 8))
#define SWAP_UINT32(x) (((x) >> 24) | (((x) & 0x00FF0000) >> 8) | (((x) & 0x0000FF00) << 8) | ((x) << 24))
Edit: These are library functions. Following them is the manual way to do it.
I am absolutely stunned by the number of people unaware of __byteswap_ushort, __byteswap_ulong, and __byteswap_uint64. Sure they are Visual C++ specific, but they compile down to some delicious code on x86/IA-64 architectures. :)
Here's an explicit usage of the bswap instruction, pulled from this page. Note that the intrinsic form above will always be faster than this, I only added it to give an answer without a library routine.
uint32 cq_ntohl(uint32 a) {
__asm{
mov eax, a;
bswap eax;
}
}
As a joke:
#include <stdio.h>
int main (int argc, char *argv[])
{
size_t sizeofInt = sizeof (int);
int i;
union
{
int x;
char c[sizeof (int)];
} original, swapped;
original.x = 0x12345678;
for (i = 0; i < sizeofInt; i++)
swapped.c[sizeofInt - i - 1] = original.c[i];
fprintf (stderr, "%x\n", swapped.x);
return 0;
}
here's a way using the SSSE3 instruction pshufb using its Intel intrinsic, assuming you have a multiple of 4 ints:
unsigned int *bswap(unsigned int *destination, unsigned int *source, int length) {
int i;
__m128i mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3);
for (i = 0; i < length; i += 4) {
_mm_storeu_si128((__m128i *)&destination[i],
_mm_shuffle_epi8(_mm_loadu_si128((__m128i *)&source[i]), mask));
}
return destination;
}
Will this work / be faster?
uint32_t swapped, result;
((byte*)&swapped)[0] = ((byte*)&result)[3];
((byte*)&swapped)[1] = ((byte*)&result)[2];
((byte*)&swapped)[2] = ((byte*)&result)[1];
((byte*)&swapped)[3] = ((byte*)&result)[0];
This code snippet can convert 32bit little Endian number to Big Endian number.
#include <stdio.h>
main(){
unsigned int i = 0xfafbfcfd;
unsigned int j;
j= ((i&0xff000000)>>24)| ((i&0xff0000)>>8) | ((i&0xff00)<<8) | ((i&0xff)<<24);
printf("unsigned int j = %x\n ", j);
}
Here's a function I have been using - tested and works on any basic data type:
// SwapBytes.h
//
// Function to perform in-place endian conversion of basic types
//
// Usage:
//
// double d;
// SwapBytes(&d, sizeof(d));
//
inline void SwapBytes(void *source, int size)
{
typedef unsigned char TwoBytes[2];
typedef unsigned char FourBytes[4];
typedef unsigned char EightBytes[8];
unsigned char temp;
if(size == 2)
{
TwoBytes *src = (TwoBytes *)source;
temp = (*src)[0];
(*src)[0] = (*src)[1];
(*src)[1] = temp;
return;
}
if(size == 4)
{
FourBytes *src = (FourBytes *)source;
temp = (*src)[0];
(*src)[0] = (*src)[3];
(*src)[3] = temp;
temp = (*src)[1];
(*src)[1] = (*src)[2];
(*src)[2] = temp;
return;
}
if(size == 8)
{
EightBytes *src = (EightBytes *)source;
temp = (*src)[0];
(*src)[0] = (*src)[7];
(*src)[7] = temp;
temp = (*src)[1];
(*src)[1] = (*src)[6];
(*src)[6] = temp;
temp = (*src)[2];
(*src)[2] = (*src)[5];
(*src)[5] = temp;
temp = (*src)[3];
(*src)[3] = (*src)[4];
(*src)[4] = temp;
return;
}
}
EDIT: This function only swaps the endianness of aligned 16 bit words. A function often necessary for UTF-16/UCS-2 encodings.
EDIT END.
If you want to change the endianess of a memory block you can use my blazingly fast approach.
Your memory array should have a size that is a multiple of 8.
#include <stddef.h>
#include <limits.h>
#include <stdint.h>
void ChangeMemEndianness(uint64_t *mem, size_t size)
{
uint64_t m1 = 0xFF00FF00FF00FF00ULL, m2 = m1 >> CHAR_BIT;
size = (size + (sizeof (uint64_t) - 1)) / sizeof (uint64_t);
for(; size; size--, mem++)
*mem = ((*mem & m1) >> CHAR_BIT) | ((*mem & m2) << CHAR_BIT);
}
This kind of function is useful for changing the endianess of Unicode UCS-2/UTF-16 files.
If you are running on a x86 or x86_64 processor, the big endian is native. so
for 16 bit values
unsigned short wBigE = value;
unsigned short wLittleE = ((wBigE & 0xFF) << 8) | (wBigE >> 8);
for 32 bit values
unsigned int iBigE = value;
unsigned int iLittleE = ((iBigE & 0xFF) << 24)
| ((iBigE & 0xFF00) << 8)
| ((iBigE >> 8) & 0xFF00)
| (iBigE >> 24);
This isn't the most efficient solution unless the compiler recognises that this is byte level manipulation and generates byte swapping code. But it doesn't depend on any memory layout tricks and can be turned into a macro pretty easily.