I'm developing a gameboy game in GBDK but I've got a problem with right-shifting (8bit) unsigned variables. The code looks like this.
#include <gb/gb.h>
#include <stdio.h>
#define GAME_OBJ_MAX_WIDTH 10
#define GAME_OBJ_MAX_HEIGHT 8
struct game_obj
{
UBYTE matrix[GAME_OBJ_MAX_WIDTH];
UBYTE width, height;
};
void draw_game_obj(struct game_obj *object)
{
for (unsigned char i = 0; i < object->width && i < GAME_OBJ_MAX_WIDTH; i++)
{
printf("BASE->%d\n", object->matrix[i]);
for (unsigned char j = 0; j < object->height && j < GAME_OBJ_MAX_HEIGHT; j++)
printf("Shift by %d->%u\n", j, object->matrix[i] >> j);
}
}
void main() {
struct game_obj racer;
racer.height = 4;
racer.width = 3;
racer.matrix[0] = 10;
racer.matrix[1] = 7;
racer.matrix[2] = 10;
draw_game_obj(&racer);
}
The output is:
BASE->10
Shift by 0->235
Shift by 1->117
Shift by 2->58
Shift by 3->29
BASE->7
Shift by 0->235
Shift by 1->117
Shift by 2->58
Shift by 3->29
BASE->10
Shift by 0->235
Shift by 1->117
Shift by 2->58
Shift by 3->29
Basically any unsigned value right shifted (even by 0) changes to 235, 117, 58 and so on... I'm trying to understand why is that.
Solution
Issue fixed by assigning object->matrix[i] to separate variable.
void draw_game_obj(struct game_obj *object)
{
for (unsigned char i = 0; i < object->width && i < GAME_OBJ_MAX_WIDTH; i++)
{
printf("BASE->%d\n", object->matrix[i]);
UBYTE u = object->matrix[i];
for (unsigned char j = 0; j < object->height && j < GAME_OBJ_MAX_HEIGHT; j++)
printf("Shift by %d->%u\n", j, u >> j);
}
}
Related
I am trying to sort floats with radix. My current algorithm works with unsigned. For example, if I enter values 12, 100, 1 my sorted values are 1, 12, and 100. However, when I use a function to convert floats to ints back to floats after calling the radix sort, my values remain unsorted. They print as they were entered by the user.
I am unsure how to modify my current function to be able to sort floats with radix.
void rs(unsigned int *a, int c) {
int i;
int m = a[0];
int bt = 0;
unsigned int *b = malloc(0 * sizeof(int));
for (i = 0; i < c; i++) {
if (a[i] > m)
m = a[i];
}
while((m>>bt) > 0){
int buck[2] = { 0 };
for (i = 0; i < c; i++) {
buck[(a[i]>>bt)&1]++;
}
for (i = 1; i < 2; i++) {
buck[i] += buck[i-1];
}
for (i = c-1; i >= 0; i--) {
b[--buck[(a[i]>>bt)&1]] = a[i];
}
for (i = 0; i < c; i++) {
a[i] = b[i];
}
bt++;
}
free(b);
}
The function I am using to transform floats to ints to floats is: Radix Sort for Floats
void rfloat(float* arr, size_t size) {
assert(sizeof(unsigned) == sizeof(float) && sizeof(float) == 4);
unsigned* d = malloc(size * sizeof(unsigned));
for (size_t i = 0; i < size; i++) {
// Interpret float as 32-bit unsigned.
d[i] = *(unsigned*) &(arr[i]);
// Flip all except top if top bit is set.
d[i] ^= (((unsigned) (((int) d[i]) >> 31)) >> 1);
// Flip top bit.
d[i] ^= (1u << 31);
}
rs(d, size);
// Inverse transform.
for (size_t i = 0; i < size; i++) {
d[i] ^= (1u << 31);
d[i] ^= (((unsigned) (((int) d[i]) >> 31)) >> 1);
arr[i] = *(float*) &(d[i]);
}
free(d);
}
There's multiple issues.
You use int all over the place where you should be using unsigned (for values) or size_t (for sizes/indices).
You allocate 0 bytes.
(m >> bt) > 0 doesn't work as a stop condition, shifting bits equal or greater than the width is not specified.
After transforming the data types to unsigned the loop boundaries don't work anymore.
I took the liberty of fixing the above and choosing some better variable names:
#include <limits.h>
void rs(unsigned int *a, size_t c) {
size_t i;
unsigned bit = 0;
unsigned *b = malloc(c * sizeof(unsigned));
unsigned m = a[0]; // Max element.
for (i = 0; i < c; i++) {
if (a[i] > m) m = a[i];
}
while (bit < CHAR_BIT*sizeof(m) && (m >> bit)) {
size_t bucket_len[2] = { 0, 0 };
for (i = 0; i < c; i++) bucket_len[(a[i] >> bit) & 1]++;
size_t bucket_end[2] = {bucket_len[0], bucket_len[0] + bucket_len[1]};
for (i = c; i-- > 0; ) {
size_t j = --bucket_end[(a[i] >> bit) & 1];
b[j] = a[i];
}
for (i = 0; i < c; i++) a[i] = b[i];
bit++;
}
free(b);
}
I want to construct two big number by array in c programming and make them add.
The following is my code:
void add(unsigned char* a, unsigned char* b, unsigned int len)
{
int i;
unsigned short T;
unsigned char carry = 0;
for (i = len - 1; i >= 0; --i)
{
T = (unsigned short)(a[i]) + (unsigned short)(b[i]) + (unsigned short)carry;
//T = a[i] + b[i] + carry;
if (T > 0xFF)
carry = 1;
else
carry = 0;
a[i] = (unsigned char)T;
}
}
The max value in array a and b for every element is 255.
EDIT1: The highest carry is discarded. The result is save in array a.
EDIT2: replace "Byte" with "carry".
The original code is :
Integer B1(B, SM3_BLOCK_SIZE);
++B1;
for (i = 0; i < ILen; i += v)
(Integer(I + i, v) + B1).Encode(I + i, v);
I write two new function. One is as the Above add(), The other is as following:
void add_one(unsigned char *arr, unsigned int len)
{
int i;
for (i = len-1; i >= 0; --i)
{
arr[len] += 1;
if (arr[len] != 0)
return;
}
}
If my code is rigth, the original code is as following:
add_one(B, SM3_BLOCK_SIZE);
for (i = 0; i < ILen; i += v)
add(I + i, B, SM3_BLOCK_SIZE);
There is (at least) one bug. Look at this code:
void add_one(unsigned char *arr, unsigned int len)
{
int i;
for (i = len-1; i >= 0; --i)
{
arr[len] += 1; // Indexing using len is wrong
if (arr[len] != 0) // Indexing using len is wrong
return;
}
}
You probably want to use i as index.
I assumed you know that you are implementing the add function for a bigendian positive integer.
Avoid using for (i = len-1; i >= 0; --i). You can catch a runtime error when i is unsigned and len is 0. Instead, use for (i = len; i-- > 0;).
If you need a little-endian integer than use for (int i = 0; i < len; ++i)
char add(unsigned char* a, unsigned char* b, unsigned int len)
{
unsigned short carry = 0;
//for (int i = 0; i < len; ++i) // for little-endian
for (int i = len; i-- > 0;) // for big-endian
{
carry += a[i] + b[i];
a[i] = carry & 0xFF;
carry >>= 8;
}
return carry;
}
Tests
unsigned char a[5] = {255,2,3,4,5};
unsigned char b[5] = {255,256-2,256-3,4,5};
char overflow = add(a,b,5);
printf("%d %d %d %d %d / %d",a[0],a[1],a[2],a[3],a[4] , overflow);
Output
255 1 0 8 10 / 1
I have matrix of '1' and '0' with the dimensions 8x8. I need to store the whole matrix in one unsigned long long variable bit by bit. How can i do that?
For example, let's take the matrix of '1' and '0' that is 2x2:
The matrix 2x2:
1 0
0 1
The variable must contain: 1001 in bits.
The same example, but over the matrix 8x8 and unsigned long long variable.
That's what i've tried to do:
#include <stdio.h>
int main()
{
unsigned long long result = 0;
char matrix[8][8]; // lets that the matrix is already filled by '1' and '0'
for (i=0; i<SIZE; i++)
{
for (j=0; j<SIZE; j++)
{
result = result | ((unsigned long long)(matrix[i][j] - '0'));
result <<= 1;
}
}
return 0;
}
Is it right? I implemented this nested loop in my algorithm and that didn't work properly.
Converting the text representation of an integer into its integer value can be done using strtoull().
char buf[sizeof(matrix)+1];
memcpy(buf, matrix, sizeof(matrix));
buf[sizeof(matrix)] = '\0';
result = strtoull(buf, NULL, 2);
try this
const int mx_size = 8;
int main() {
unsigned long long result = 0;
bool matrix[8][8]; // lets that the matrix is already filled by '1' and '0'
for (int i =0; i < mx_size; ++i)
matrix[i][i] = 1;
for (int i = 0; i < mx_size; i++) {
for (int j = 0; j < mx_size; j++) {
result |= (unsigned long long)matrix[i][j] << (i*mx_size + j);
}
}
return 0;
}
Here you have the code (a bit more
#include <stdio.h>
#include <stdint.h>
uint64_t convert(char matrix[8][8], int order, char zero)
{
uint8_t byte;
uint64_t result = 0;
for(size_t row = 0; row < 8; row++)
{
byte = 0;
for(size_t column = 0; column < 8; column++)
{
byte <<= 1;
byte |= matrix[row][column] != zero ? 1 : 0; //anything != defined zero char is 1
}
if (order)
{
result |= (uint64_t)byte << (8 * row);
}
else
{
result |= (uint64_t)byte << (56 - 8 * row);
}
}
return result;
}
int main(void) {
char matrix[8][8] =
{
{'1','0','1','0','1','0','1','0'},
{'0','1','0','1','0','1','0','1'},
{'1','1','1','0','0','0','1','1'},
{'0','0','0','1','1','1','0','0'},
{'1','1','1','1','1','0','0','0'},
{'0','0','0','0','1','1','1','1'},
{'1','1','0','0','1','1','0','0'},
{'0','0','1','1','0','0','1','1'},
};
unsigned long long result = convert(matrix, 0, '0');
for(size_t index = 0; index < 64; index ++)
printf("%1d", !!(result & (1ULL << index)));
printf("\n");
result = convert(matrix,1, '0');
for(size_t index = 0; index < 64; index ++)
printf("%1d", !!(result & (1ULL << index)));
printf("\n");
return 0;
}
I've been looking for an answer for this but could not find it. I'm not trying to use ^ as a power operator.
Here's my problem.
I'm trying to reproduce an interleaver from a digital TV standard (physical layer protocol). It is a project for school.
Here's the code I have so far in C:
#include <sys/io.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <time.h>
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
#include <complex.h>
#include <math.h>
int main()
{
int Mmax = 32768;
int Nr = log10(Mmax)/log10(2);
printf("Nr = %i \n",Nr);
int Lfm = 42;
int i,j,k,l;
// Create array R'i
unsigned char Rli[Mmax][Nr];
for (i = 0; i < Mmax; i++)
memset(Rli[i], 0, sizeof(unsigned char)*(Nr));
for (i = 0; i < Mmax; i++)
{
for(j = 1; j < Nr; j++)
{
if(i == 0 || i == 1)
{
Rli[i][j] = 0;
}
else if(i == 2)
{
Rli[i][j] = 0;
if(j == Nr - 2)
Rli[i][j] = 1;
}
else
{
Rli[i][j+1] = Rli[i-1][j];
}
}
if(i > 2)
{
Rli[i][1] = (((Rli[i-1][14] ^ Rli[i-1][13] ) ^ Rli[i-1][12] ) ^ Rli[i-1][2]);
}
Rli[i][0] = 0;
}
// wire permutation for array R'i - creates array Ri
unsigned char Ri[Mmax][Nr];
for (i = 0; i < Mmax; i++)
memset(Ri[i], 0, sizeof(unsigned char)*(Nr));
for (i = 0; i < Mmax; i++)
{
Ri[i][1] = Rli[i][8]; //Rli[i][7];
Ri[i][2] = Rli[i][9]; //Rli[i][8];
Ri[i][3] = Rli[i][14]; //Rli[i][13];
Ri[i][4] = Rli[i][4]; //Rli[i][3];
Ri[i][5] = Rli[i][6]; //Rli[i][5];
Ri[i][6] = Rli[i][13]; //Rli[i][12];
Ri[i][7] = Rli[i][3]; //Rli[i][2];
Ri[i][8] = Rli[i][2]; //Rli[i][1];
Ri[i][9] = Rli[i][12]; //Rli[i][11];
Ri[i][10] = Rli[i][5]; //Rli[i][4];
Ri[i][11] = Rli[i][10]; //Rli[i][9];
Ri[i][12] = Rli[i][11]; //Rli[i][10];
Ri[i][13] = Rli[i][1]; //Rli[i][0];
Ri[i][14] = Rli[i][7]; //Rli[i][6];
}
// Offset generator Gk
int Gmax = floor(Lfm/2);
unsigned char Gk[Gmax][Nr];
for (i = 0; i < Gmax; i++)
memset(Gk[i], 0, sizeof(unsigned char)*Nr-1);
for (k = 0; k < Gmax; k++)
{
for(j = 0; j < Nr; j++)
{
if(k == 0)
{
Gk[k][j] = 1;
}
else
{
Gk[k][j+1] = Gk[k-1][j];
}
}
if(k > 0)
{
Gk[k][0] = ( Gk[k-1][14] ^ Gk[k-1][13] );
}
}
// interleaving sequence
int Ndata = 26303; // numero de data
unsigned char Hl[Ndata];
double H1[Ndata], H2[Ndata];
memset(Hl, 0, sizeof(unsigned char)*Ndata);
memset(H1, 0, sizeof(double)*Ndata);
memset(H2, 0, sizeof(double)*Ndata);
int p,indice;
// loop from page 107 of the physical layer protocol.
for (i = 0; i < Lfm; i++)
{
for (k = 0; k < Mmax; k++)
{
p = 0;
for (j = 0; j < Nr; j++) // sum
{
if(j >= 0 && j <= Nr - 2)
H1[p] += Ri[k][j]*pow(2,j);
else if(j >= 0 && j <= Nr - 1)
{
indice = floor(i/2);
H2[p] += Gk[indice][j]*pow(2,j);
}
}
/*****/ Hl[p] = ((i % 2)*pow(2,Nr-1) + H1[p] ) ^ H2[p]; /*****/
if (Hl[p] < Ndata)
p += 1;
}
}
the line between the /*****/ in the end generates an error when I try to compile.
freqint.c:146:43: error: invalid operands to binary ^ (have ‘double’ and ‘double’)
Hl[p] = ((i % 2)*pow(2,Nr-1) + H1[p] ) ^ H2[p];
I want to do the XOR operation there, but I can't get it right.
I'm taking this from here.
the interleaving sequence is on the bottom of page 107
How do I write my code to avoid that error message and still do an XOR?
As was stated in the comments, the ^ operator can't be used on a double, however you don't need to use double.
You're using two functions which return a double, namely pow and floor.
Each time you call pow, you pass 2 for the base. Raising 2 to a power can be done much more efficiently by left shifting the value 1 by the exponent.
Similarly, each time you call floor you're dividing an integer value by 2. Since integer division automatically drops the remainder of division (assuming you're working only with positive values), the call to floor doesn't buy you anything.
So change all instances of pow(2,x) to (1 << x), and change all instances of floor(x/2) to (x/2). Then you can declare H1 and H2 as arrays of unsigned int and you will be able to use the XOR operator ^.
I have an array with 16 elements. I would like to evaluate these to a boolean 0 or 1 and then store this in 2 bytes so i can write to a binary file. How do I do this?
Something like this you mean?
unsigned short binary = 0, i;
for ( i = 0; i < 16; ++i )
if ( array[i] )
binary |= 1 << i;
// the i-th bit of binary is 1 if array[i] is true and 0 otherwise.
You have to use bitwise operators.
Here's an example:
int firstBit = 0x1;
int secondBit = 0x2;
int thirdBit = 0x4;
int fourthBit = 0x8;
int x = firstBit | fourthBit; /*both the 1st and 4th bit are set */
int isFirstBitSet = x & firstBit; /* Check if at least the first bit is set */
int values[16];
int i;
unsigned short word = 0;
unsigned short bit = 1;
for (i = 0; i < 16; i++)
{
if (values[i])
{
word |= bit;
}
bit <<= 1;
}
This solution avoid the use of the if inside the loop:
unsigned short binary = 0, i;
for ( i = 0; i < 16; ++i )
binary |= (array[i] != 0) << i;
Declare an array result with two bytes, then you loop through the source array:
for (int i = 0; i < 16; i++) {
// calclurate index in result array
int index = i >> 3;
// shift value in result
result[index] <<= 1;
// check array value
if (theArray[i]) {
// true, so set lowest bit in result byte
result[index]++;
}
}
Something like this.
int values[16];
int bits = 0;
for (int ii = 0; ii < 16; ++ii)
{
bits |= (!!values[ii]) << ii;
}
unsigned short output = (unsigned short)bits;
the expression (!!values[ii]) forces the value to be 0 or 1, if you know for sure that the values array already contains either a 0 or a 1 and nothing else, you can leave of the !!
You could also do this if you don't like the !! syntax.
int values[16];
int bits = 0;
for (int ii = 0; ii < 16; ++ii)
{
bits |= (values[ii] != 0) << ii;
}
unsigned short output = (unsigned short)bits;