How to retrieve value from bytes stored in a byte array? - c

I am trying to store 8 bytes in a byte array to store the value of a pointer.
int main() {
unsigned long a = 0;
char buf[8];
int i = 0;
int *p = &i;
a = (unsigned long)p;
while (i < 8)
{
buf[i] = (a >> (8 * i)) & 0xFF;
i++;
}
a = 0;
i = 0;
while (i < 8)
{
a = ?
i++;
}
p = (int *)a;
}
The first loop stores successive bytes of p, as casted into usigned long in a, but I don't know how to retrieve the value in the second loop. Does somebody have a clue?

This is the inverse code to yours:
a = 0;
while (i < 8)
{
a |= ((unsigned long)buf[i] & 0xff ) << (8 * i);
i++;
}

Related

Radix Sort Float

I am trying to sort floats with radix. My current algorithm works with unsigned. For example, if I enter values 12, 100, 1 my sorted values are 1, 12, and 100. However, when I use a function to convert floats to ints back to floats after calling the radix sort, my values remain unsorted. They print as they were entered by the user.
I am unsure how to modify my current function to be able to sort floats with radix.
void rs(unsigned int *a, int c) {
int i;
int m = a[0];
int bt = 0;
unsigned int *b = malloc(0 * sizeof(int));
for (i = 0; i < c; i++) {
if (a[i] > m)
m = a[i];
}
while((m>>bt) > 0){
int buck[2] = { 0 };
for (i = 0; i < c; i++) {
buck[(a[i]>>bt)&1]++;
}
for (i = 1; i < 2; i++) {
buck[i] += buck[i-1];
}
for (i = c-1; i >= 0; i--) {
b[--buck[(a[i]>>bt)&1]] = a[i];
}
for (i = 0; i < c; i++) {
a[i] = b[i];
}
bt++;
}
free(b);
}
The function I am using to transform floats to ints to floats is: Radix Sort for Floats
void rfloat(float* arr, size_t size) {
assert(sizeof(unsigned) == sizeof(float) && sizeof(float) == 4);
unsigned* d = malloc(size * sizeof(unsigned));
for (size_t i = 0; i < size; i++) {
// Interpret float as 32-bit unsigned.
d[i] = *(unsigned*) &(arr[i]);
// Flip all except top if top bit is set.
d[i] ^= (((unsigned) (((int) d[i]) >> 31)) >> 1);
// Flip top bit.
d[i] ^= (1u << 31);
}
rs(d, size);
// Inverse transform.
for (size_t i = 0; i < size; i++) {
d[i] ^= (1u << 31);
d[i] ^= (((unsigned) (((int) d[i]) >> 31)) >> 1);
arr[i] = *(float*) &(d[i]);
}
free(d);
}
There's multiple issues.
You use int all over the place where you should be using unsigned (for values) or size_t (for sizes/indices).
You allocate 0 bytes.
(m >> bt) > 0 doesn't work as a stop condition, shifting bits equal or greater than the width is not specified.
After transforming the data types to unsigned the loop boundaries don't work anymore.
I took the liberty of fixing the above and choosing some better variable names:
#include <limits.h>
void rs(unsigned int *a, size_t c) {
size_t i;
unsigned bit = 0;
unsigned *b = malloc(c * sizeof(unsigned));
unsigned m = a[0]; // Max element.
for (i = 0; i < c; i++) {
if (a[i] > m) m = a[i];
}
while (bit < CHAR_BIT*sizeof(m) && (m >> bit)) {
size_t bucket_len[2] = { 0, 0 };
for (i = 0; i < c; i++) bucket_len[(a[i] >> bit) & 1]++;
size_t bucket_end[2] = {bucket_len[0], bucket_len[0] + bucket_len[1]};
for (i = c; i-- > 0; ) {
size_t j = --bucket_end[(a[i] >> bit) & 1];
b[j] = a[i];
}
for (i = 0; i < c; i++) a[i] = b[i];
bit++;
}
free(b);
}

How to concatenate bit by bit in c?

I have matrix of '1' and '0' with the dimensions 8x8. I need to store the whole matrix in one unsigned long long variable bit by bit. How can i do that?
For example, let's take the matrix of '1' and '0' that is 2x2:
The matrix 2x2:
1 0
0 1
The variable must contain: 1001 in bits.
The same example, but over the matrix 8x8 and unsigned long long variable.
That's what i've tried to do:
#include <stdio.h>
int main()
{
unsigned long long result = 0;
char matrix[8][8]; // lets that the matrix is already filled by '1' and '0'
for (i=0; i<SIZE; i++)
{
for (j=0; j<SIZE; j++)
{
result = result | ((unsigned long long)(matrix[i][j] - '0'));
result <<= 1;
}
}
return 0;
}
Is it right? I implemented this nested loop in my algorithm and that didn't work properly.
Converting the text representation of an integer into its integer value can be done using strtoull().
char buf[sizeof(matrix)+1];
memcpy(buf, matrix, sizeof(matrix));
buf[sizeof(matrix)] = '\0';
result = strtoull(buf, NULL, 2);
try this
const int mx_size = 8;
int main() {
unsigned long long result = 0;
bool matrix[8][8]; // lets that the matrix is already filled by '1' and '0'
for (int i =0; i < mx_size; ++i)
matrix[i][i] = 1;
for (int i = 0; i < mx_size; i++) {
for (int j = 0; j < mx_size; j++) {
result |= (unsigned long long)matrix[i][j] << (i*mx_size + j);
}
}
return 0;
}
Here you have the code (a bit more
#include <stdio.h>
#include <stdint.h>
uint64_t convert(char matrix[8][8], int order, char zero)
{
uint8_t byte;
uint64_t result = 0;
for(size_t row = 0; row < 8; row++)
{
byte = 0;
for(size_t column = 0; column < 8; column++)
{
byte <<= 1;
byte |= matrix[row][column] != zero ? 1 : 0; //anything != defined zero char is 1
}
if (order)
{
result |= (uint64_t)byte << (8 * row);
}
else
{
result |= (uint64_t)byte << (56 - 8 * row);
}
}
return result;
}
int main(void) {
char matrix[8][8] =
{
{'1','0','1','0','1','0','1','0'},
{'0','1','0','1','0','1','0','1'},
{'1','1','1','0','0','0','1','1'},
{'0','0','0','1','1','1','0','0'},
{'1','1','1','1','1','0','0','0'},
{'0','0','0','0','1','1','1','1'},
{'1','1','0','0','1','1','0','0'},
{'0','0','1','1','0','0','1','1'},
};
unsigned long long result = convert(matrix, 0, '0');
for(size_t index = 0; index < 64; index ++)
printf("%1d", !!(result & (1ULL << index)));
printf("\n");
result = convert(matrix,1, '0');
for(size_t index = 0; index < 64; index ++)
printf("%1d", !!(result & (1ULL << index)));
printf("\n");
return 0;
}

How can I encode four 16 bit uints into a 64 bit uint, and decode them again?

I wrote this function with the help of this page on bit twiddling:
uint16_t *decode(uint64_t instr) {
// decode instr (this is new to me lol)
uint16_t icode = (instr >> 48) & ((1 << 16) - 1);
uint16_t p1 = (instr >> 32) & ((1 << 16) - 1);
uint16_t p2 = (instr >> 16) & ((1 << 16) - 1);
uint16_t p3 = (instr >> 00) & ((1 << 16) - 1);
return (uint16_t[]){icode, p1, p2, p3};
}
I have this to test it:
uint16_t *arr = decode(number);
for(int i = 0; i < 4; i++) {
printf("%d\n", arr[i]);
}
However, this prints 0 four times whatever number is. I also haven't solved the first part of the question, how to encode the four uint16_t's in the first place.
how to encode the four uint16_t's in the first place
This isn't hard. All you have to do is to load each uint16_t to a uint64_t one-by-one, and then return that uint64_t:
uint64_t encode(uint16_t uints[]) {
uint64_t master = 0;
for (uint8_t index = 0; index <= 3; ++index) {
master <<= 16; // Shift master left by 16 bits to create space for the next uint16
master |= uints[index]; // Load uints[index] to the lower 16 bits of master
} // Do this four times
return master;
}
To load the uint16_ts in reverse order, simply replace uint8_t index = 0; index <= 3; ++index with uint8_t index = 3; index >= 0; --index.
Your best bet is actually to use memcpy. Most modern compilers will optimize this into the necessary bit shifts and such for you.
uint64_t pack(const uint16_t arr[static 4]) {
uint64_t res;
memcpy(&res, arr, 8);
return res;
}
void unpack(uint64_t v, uint16_t arr[static 4]) {
memcpy(arr, &v, 8);
}
Note that the result is endian-dependent, appropriate for packing and unpacking on the same machine. Note too that I'm using the static array specifier to check that the caller passes at least 4 elements (when such checking is possible); if that gives your compiler grief, just remove the static specifier.
First, you can't pass an array back from a function the way you currently have it (which is why you're getting 0's), you'll need to pass it via pointer or static reference.
However, since you're dealing with 2 known bit-widths, you can use a mask and shift off that:
out[0] = val & 0x000000000000FFFF; // 1st word
out[1] = (val & 0x00000000FFFF0000) >> 16; // 2nd word
out[2] = (val & 0x0000FFFF00000000) >> 32; // 3rd word
out[3] = (val & 0xFFFF000000000000) >> 48; // 4th word
You could put this in a function or macro:
#define MACRO_DECODE(val, arr) arr[0]= val & 0x000000000000FFFF; \
arr[1] = (val & 0x00000000FFFF0000) >> 16; \
arr[2] = (val & 0x0000FFFF00000000) >> 32; \
arr[3] = (val & 0xFFFF000000000000) >> 48;
void decode(uint64_t val, uint16_t *out)
{
out[0] = val & 0x000000000000FFFF;
out[1] = (val & 0x00000000FFFF0000) >> 16;
out[2] = (val & 0x0000FFFF00000000) >> 32;
out[3] = (val & 0xFFFF000000000000) >> 48;
}
int main(int argc, char** argv)
{
int i;
uint16_t arr[] = { 0, 0, 0, 0} ;
for (i = 0; i < 4; ++i) {
printf("%#06x = %d\n", arr[i], arr[i]);
}
// as a function
decode(0xAAAABBBBCCCCDDDD, arr);
for (i = 0; i < 4; ++i) {
printf("%#06x = %d\n", arr[i], arr[i]);
}
// as a macro
MACRO_DECODE(0xDDDDCCCCBBBBAAAA, arr);
for (i = 0; i < 4; ++i) {
printf("%#06x = %d\n", arr[i], arr[i]);
}
return 0;
}
Additionally, you could use memcpy:
int main(int argc, char** argv)
{
int i;
uint16_t arr[] = { 0, 0, 0, 0} ;
uint64_t src = 0xAAAABBBBCCCCDDDD;
for (i = 0; i < 4; ++i) {
printf("%#06x = %d\n", arr[i], arr[i]);
}
// memcpy
memcpy(arr, &src, sizeof(arr));
for (i = 0; i < 4; ++i) {
printf("%#06x = %d\n", arr[i], arr[i]);
}
return 0;
}
Hope that can help.

Error while splitting unsigned int into unsigned char[]

I am trying to split an unsigned int into the numbers into places: for example the number 234 becomes in the unsigned char array {0,0,0,2,3,4} and i am seeing strange effects , i am not sure i am doing it the right way, is there a better one?
here is the code i am using now:
void display_decl(unsigned int j)
{
unsigned char lst[6];
lst[5] = j & 0x0f;
lst[4] = j >> 4 & 0x0f;
lst[3] = j >> 8 & 0x0f;
lst[2] = j >> 12 & 0x0f;
lst[1] = j >> 16 & 0x0f;
lst[0] = j >> 20 & 0x0f;
display_digits(lst);
}
To split a number into char code needs use the intended base and null character terminate to treat as a string.
#include <limits.h>
void display_decl(unsigned int j) {
char lst[sizeof j * CHAR_BIT + 1];
unsigned base = 10; // or 16 or any base 2 to 16
char *p = &lst[sizeof lst] - 1;
*p = '\0';
do {
p--;
*p = "0123456789ABCDEF"[j%base];
j /= base;
} while (j > 0);
display_string(p);
}

C: Building a byte

I have an array with 16 elements. I would like to evaluate these to a boolean 0 or 1 and then store this in 2 bytes so i can write to a binary file. How do I do this?
Something like this you mean?
unsigned short binary = 0, i;
for ( i = 0; i < 16; ++i )
if ( array[i] )
binary |= 1 << i;
// the i-th bit of binary is 1 if array[i] is true and 0 otherwise.
You have to use bitwise operators.
Here's an example:
int firstBit = 0x1;
int secondBit = 0x2;
int thirdBit = 0x4;
int fourthBit = 0x8;
int x = firstBit | fourthBit; /*both the 1st and 4th bit are set */
int isFirstBitSet = x & firstBit; /* Check if at least the first bit is set */
int values[16];
int i;
unsigned short word = 0;
unsigned short bit = 1;
for (i = 0; i < 16; i++)
{
if (values[i])
{
word |= bit;
}
bit <<= 1;
}
This solution avoid the use of the if inside the loop:
unsigned short binary = 0, i;
for ( i = 0; i < 16; ++i )
binary |= (array[i] != 0) << i;
Declare an array result with two bytes, then you loop through the source array:
for (int i = 0; i < 16; i++) {
// calclurate index in result array
int index = i >> 3;
// shift value in result
result[index] <<= 1;
// check array value
if (theArray[i]) {
// true, so set lowest bit in result byte
result[index]++;
}
}
Something like this.
int values[16];
int bits = 0;
for (int ii = 0; ii < 16; ++ii)
{
bits |= (!!values[ii]) << ii;
}
unsigned short output = (unsigned short)bits;
the expression (!!values[ii]) forces the value to be 0 or 1, if you know for sure that the values array already contains either a 0 or a 1 and nothing else, you can leave of the !!
You could also do this if you don't like the !! syntax.
int values[16];
int bits = 0;
for (int ii = 0; ii < 16; ++ii)
{
bits |= (values[ii] != 0) << ii;
}
unsigned short output = (unsigned short)bits;

Resources