Converting from decimal to base 4 using bitmasking - c

I'm trying to write a program that converts from decimal to base 4 using bit masking. First I wrote this program that converts from decimal to binary to understand how it works
#include <stdio.h>
void binary(unsigned int num) {
int i;
unsigned temp, mask=1;
mask<<=31;
for(i=0; i<32; i++) {
temp=num&mask;
printf("%d", temp>>(31-i));
mask>>=1;
}
}
int main () {
int n;
printf("Enter a number:\n");
scanf("%d", &n);
binary(n);
return 0;
}
Then I was trying to use the same approach here
void base4(unsigned int num) {
int i;
unsigned temp, mask=3;
mask<<=15;
for(i=0; i<16; i++) {
temp= // <<<==== ???
printf("%d", temp>>(15-i)*2);
mask>>=1;
}
I know that in in base 4, numbers use 2 bits. So if we are ANDing each bit of a given value lets say 22 with the corresponding bit of the mask=3 we will end up with something like this
.... 0 0 1 1 2 (base4)
.... 00 00 01 01 10 (bin)
I just couldn't use this information to apply it to my previous approach, would be great if someone could help.

char *toBase4(char *buff, unsigned val, int printZeroes)
{
char *wrk = buff;
int shift = sizeof(val) * CHAR_BIT - 2;
unsigned mask = 3 << shift;
do
{
if((val & mask) || printZeroes)
{
*wrk++ = ((val & mask) >> shift) + '0';
printZeroes = 1;
}
mask >>= 2;
}while((shift -= 2) >= 0);
*wrk = 0;
return buff;
}
int main(void)
{
char x[256];
printf("%s\n", toBase4(x, 0xff00, 0));
printf("%s\n", toBase4(x, 0xff00, 1));
}

void base4(unsigned int num) {
int i;
unsigned int temp, mask=3;
mask<<=30;
for(i=0; i<16; i++) {
temp=num&mask;
printf("%d", temp>>(15-i)*2);
mask>>=2;
}
}

Related

Why does the array contain values which I did not specify?

I'm trying to make a program which crosses binary numbers. The problem is with the cross function. It accepts two binary sequences and returns 5 sequences which are the result of crossing the arguments. Somewhy, the first of these sequences has a mess of values, and I cannot really solve this problem. Does anyone have any ideas?
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define BINARY_LEN 5
#define POPULATION 5
// #define CROSS_BINARY_LIMIT 3
unsigned randrange(unsigned lower, unsigned upper)
{
return lower + rand() / (RAND_MAX / (upper - lower + 1) + 1);
}
unsigned char *int_to_bin(unsigned number)
{
unsigned char *binary = malloc(BINARY_LEN);
unsigned count = 0;
while (number > 0)
{
binary[count] = number % 2;
number /= 2;
count++;
}
return binary;
}
unsigned char **cross(unsigned char *parent_1, unsigned char *parent_2)
{
unsigned char **offspring = malloc(POPULATION);
unsigned cross_binary_point;
for (unsigned char i = 0; i < POPULATION; i++)
{
cross_binary_point = randrange(0, BINARY_LEN);
offspring[i] = malloc(BINARY_LEN);
for (unsigned char j = 0; j < BINARY_LEN; j++)
{
if (j < cross_binary_point)
{
offspring[i][j] = parent_1[j];
}
else
{
offspring[i][j] = parent_2[j];
}
}
}
return offspring;
}
int main(void)
{
unsigned char *x = int_to_bin(14);
unsigned char *y = int_to_bin(18);
for (unsigned char i = BINARY_LEN; i > 0; i--)
{
printf("%hhu", x[i - 1]);
}
printf("\n");
for (unsigned char i = BINARY_LEN; i > 0; i--)
{
printf("%hhu", y[i - 1]);
}
printf("\n\n");
unsigned char **ofspr = cross(x, y);
printf("%s\n", ofspr[0]); // Try to check out what's wrong with the first array
for (unsigned char i = 0; i < POPULATION; i++)
{
for (unsigned char j = BINARY_LEN; j > 0; j--)
{
printf("%hhu", ofspr[i][j]);
}
printf("\n");
}
free(ofspr);
free(x);
free(y);
}
The output is like this:
01110
10010
`w;
00059119
01011
01001
01111
01011
Maybe there is some memory conflict stuff, but I do not have any ideas
unsigned char **offspring = malloc(POPULATION);
only allocates 5 bytes, you want 5 pointers
should be
unsigned char **offspring = malloc(POPULATION * sizeof(char*));

Trouble printing decimal to binary number properly

I'm trying to write code to print the binary representation of a number. This was my attempt:
#include <stdio.h>
void getBinary(int num);
int main(void) {
int num = 2;
getBinary(num);
return 0;
}
void getBinary(int num) {
int mask = 1 << 31, i;
int temp;
for (i = 31; i >= 0; i--) {
temp = num & mask;
if (temp > 0)
printf("1");
else
printf("0");
mask = mask >> 1;
}
printf("\n");
}
And this doesn't work. If i make num = 2 my output will be ...0011. This is the answer I was given:
void getBinary(int);
int main()
{
int num=0;
printf("Enter an integer number :");
scanf("%d",&num);
printf("\nBinary value of %d is =",num);
getBinary(num);
return 0;
}
/*Function definition : getBinary()*/
void getBinary(int n)
{
int loop;
/*loop=15 , for 16 bits value, 15th bit to 0th bit*/
for(loop=15; loop>=0; loop--)
{
if( (1 << loop) & n)
printf("1");
else
printf("0");
}
}
and this will give the correct output. Why? What is different between my code and this. Aren't they doing the same thing with the solution doing it in fewer steps?
#include <stdio.h>
void getBinary(int num);
int main(void) {
unsigned int num = 2;
getBinary(num);
return 0;
}
void getBinary(int num) {
unsigned int mask = 1 << 31;
int i;
unsigned int temp;
for (i = 31; i >= 0; i--) {
temp = num & mask;
if (temp > 0)
printf("1");
else
printf("0");
mask = mask >> 1;
}
printf("\n");
}
For those curious, this is the correct answer. Just make the mask unsigned.
You have some problems in the code:
1 << 31 invokes undefined behavior because of signed arithmetic overflow. On your platform it most likely produces the value INT_MIN, that has the expected bit configuration but is negative.
temp > 0 might fail incorrectly if num < 0, because the sign bit will make temp negative too.
shifting mask to the right when its value is negative has an implementation defined result: On your platform it duplicates the sign bit, therefore mask will not have a single bit as expected, but all bits set above the current one. This explains the observed behavior.
You should use type unsigned int in function getBinary for num, temp and mask to get correct behavior.
Here is a corrected version of your code:
#include <stdio.h>
void getBinary(unsigned int num);
int main(void) {
unsigned int num = 2;
getBinary(num);
return 0;
}
void getBinary(unsigned int num) {
unsigned int mask = 1U << 31;
unsigned int temp;
for (int i = 31; i >= 0; i--) {
temp = num & mask;
if (temp != 0)
printf("1");
else
printf("0");
mask = mask >> 1;
}
printf("\n");
}
The proposed solution only prints 16 bits, which may or may not have been the specification. If the int type is larger than 16 bits, the bit shift will not overflow and everything is fine. If int has 16 bits, 1 << 15 invokes undefined behavior, so the solution is not strictly correct, but will function on most current platforms.
Here is a simpler solution:
void getBinary(unsigned int num) {
for (int shift = 32; shift-- > 0;) {
putchar('0' + ((num >> shift) & 1));
}
putchar("\n");
}
Initialize shift to 16 to print only the low order 16 bits.

How to rotate bits to the left and add rotated bits to the right

I'm trying to get my program to work, where I shift bits to the left and add the shifted bits to the right. For example 00111000, if you shift it 4 positions to the left, the outcome should be 10000011. How can I make this work, I know that I need to use the bitwise OR. I have added the main function below.
#include <stdio.h>
#include <stdlib.h>
void printbits(int b){
int i;
int s = 8 * (sizeof b) - 1; /* 31 if int is 32 bits */
for(i=s;i>=0;i--)
putchar( b & 1<<i ? '1' : '0');
}
int main(){
char dir; /* L=left R=right */
int val, n, i;
scanf("%d %d %c",&val, &n, &dir);
printbits(val);putchar('\n');
for (i=0; i<10; i++){
if (dir=='L' || dir =='l')
rotateLeft(&val, n);
else
rotateRight(&val,n);
printbits(val); putchar('\n');
}
return;
}
This is the rotateLeft en rotateRight function.
#include <stdio.h>
#include <stdlib.h>
void rotateLeft(int *val, int N){
int num = val[0];
int pos = N;
int result = num << pos;
}
void rotateRight(int *val, int N){
int num = val[0];
int pos = N;
int result = num >> pos;
}
Here is a tested and non-optimized solution to complete your source code:
void rotateLeft(int *val, int N){
unsigned int num = val[0];
int pos = N;
unsigned int part1 = num << pos;
unsigned int part2 = (num >> ((sizeof(val[0])*CHAR_BIT)-pos));
if (N != 0) {
val[0] = part1 | part2;
}
}
void rotateRight(int *val, int N){
unsigned int num = val[0];
int pos = N;
unsigned int part1 = num >> pos;
unsigned int part2 = (num << ((sizeof(val[0])*CHAR_BIT)-pos));
if (N != 0) {
val[0] = part1 | part2;
}
}
To prevent automatic carry during the shift right, you have to
consider value as unsigned int.
To prevent N = 0 interference, assign the result to the entry only when (N != 0). (See remark on post ROL / ROR on variable using inline assembly in Objective-C)
MSB = (n >> (NUM_OF_BITS_IN_INT - 1))
n = (n << 1) | MSB;
Left bit rotation of n by 1 bit.
You are just shrugging off the MSB but not adding it back at LSB position.
use the functions in this link for performing the rotations:
https://en.wikipedia.org/wiki/Circular_shift

How to print binary from an integer

#include <stdio.h>
#include <stdlib.h>
void main()
{
int testNums[] = {3, 0x12, 0xFF, -3};
int testBits[] = {9, 7, 12, 15};
int i;
for (i = 0; i < sizeof(testNums) / sizeof(testNums[0]); i++)
printBin(testNums[i], testBits[i]);
}
void printBin(int num, int bits)
{
int pow;
int mask = 1 << bits - 1;
for(pow=0; pow<bits; pow++)
{
if(mask & num)
printf("1");
else
printf("0");
num<<1;
}
printf("\n");
}
It doesn't print out the correct binary number, but has the right amount of bits, and advice on how I can fix this?
Your problem is here, in printBin:
num<<1;
This statement does nothing. What you want is:
num<<=1;
For clarity, you should also use parenthesis here:
int mask = 1 << (bits - 1);
And you should move printBin above main so the definition is visible at the time the function is called.
Finally, main should always return int.
There are several issues in your code:
You need a prototype for printBin().
Use int main(void) or int main(int argc, char** argv) instead of void main().
num<<1; is an expression that has no side effects. Perhaps you are able infer this from compiler's warning. Write num<<=1; to make a difference.
Only a suggestion: Why not replace the if statement with printf("%d", !!(mask & num));?
Here is the fixed code that compiles without warnings using clang:
#include <stdio.h>
#include <stdlib.h>
void printBin(int num, int bits);
int main(void)
{
int testNums[] = {3, 0x12, 0xFF, -3};
int testBits[] = {9, 7, 12, 15};
int i;
for (i = 0; i < sizeof testNums / sizeof testNums[0]; i++)
printBin(testNums[i], testBits[i]);
return 0;
}
void printBin(int num, int bits)
{
int pow;
int mask = 1 << (bits - 1);
for(pow=0; pow<bits; pow++)
{
printf("%d", mask & num);
num <<= 1;
}
putchar('\n');
}
Output:
000000011
0010010
000011111111
111111111111101
Although your approach is conventional and #dbush's answer is appropriate, I would like like to mention another approach. You may know this and it doesn't handle the negative values very well but in this approach, you can convert number of any base into other very easily
#include <stdio.h>
#include <string.h>
#define RESOLUTION 128
void compute(int number, int base, char* placeHolder){
int k = 0;
int i, j;
char temp;
while(number){
placeHolder[k++] = "0123456789ABCDEF"[number % base];
number /= base;
}
if (k != 0) {
placeHolder[k] = '\0';
for(i = 0, j = strlen(placeHolder) - 1; i < j; i++, j--){
temp = placeHolder[i];
placeHolder[i] = placeHolder[j];
placeHolder[j] = temp;
}
}else{
placeHolder[0] = '0';
placeHolder[1] = '\0';
}
}
int main(void) {
char bin[RESOLUTION];
compute(0xFF, 10, bin);
printf("%s\n", bin);
return 0;
}
Change the RESOLUTION value at you will.
More specific to binary, you can print binary of any datatype like this
void printBits(size_t const size, void const * const ptr)
{
unsigned char *b = (unsigned char*) ptr;
unsigned char byte;
int i, j;
for (i=size-1;i>=0;i--)
{
for (j=7;j>=0;j--)
{
byte = b[i] & (1<<j);
byte >>= j;
printf("%u", byte);
}
}
puts("");
}
int main()
{
int i = 23;
uint ui = UINT_MAX;
float f = 23.45f;
printBits(sizeof(i), &i);
printBits(sizeof(ui), &ui);
printBits(sizeof(f), &f);
return 0;
}

c; converting 2 bytes to short and vice versa

I want to convert array of bytes bytes1 (little endian), 2 by 2, into an array of short integers, and vice versa . I expect to get final array bytes2, equal to initial array bytes1. I have code like this:
int i = 0;
int j = 0;
char *bytes1;
char *bytes2;
short *short_ints;
bytes1 = (char *) malloc( 2048 );
bytes2 = (char *) malloc( 2048 );
short_ints = (short *) malloc( 2048 );
for ( i=0; i<2048; i+=2)
{
short_ints[j] = bytes1[i+1] << 8 | bytes1[i] ;
j++;
}
j = 0;
for ( i=0; i<2048; i+=2)
{
bytes2[i+1] = (short_ints[j] >> 8) & 0xff;
bytes2[i] = (short_ints[j]) ;
j++;
}
j = 0;
Now, can someone tell me why I haven't got bytes2 array, completely the same as bytes1 ? And how to do this properly?
Suggest 2 functions. Do all combining and extraction as unsigned to remove issues with the sign bit in short and maybe char.
The sign bit is OP's code biggest problem. short_ints[j] = bytes1[i+1] << 8 | bytes1[i] ; likely does a sign extend with bytes1[i] conversion to int.
Also (short_ints[j] >> 8) does a sign extend.
// Combine every 2 char (little endian) into 1 short
void charpair_short(short *dest, const char *src, size_t n) {
const unsigned char *usrc = (const unsigned char *) src;
unsigned short *udest = (unsigned short *) dest;
if (n % 2) Handle_OddError();
n /= 2;
while (n-- > 0) {
*udest = *usrc++;
*udest += *usrc++ * 256u;
udest++;
}
}
// Break every short into 2 char (little endian)
void short_charpair(char *dest, const short *src, size_t n) {
const unsigned short *usrc = (const unsigned short *) src;
unsigned char *udest = (unsigned char *) dest;
if (n % 2) Handle_OddError();
n /= 2;
while (n-- > 0) {
*udest++ = (unsigned char) (*usrc);
*udest++ = (unsigned char) (*usrc / 256u);
usrc++;
}
}
int main(void) {
size_t n = 2048; // size_t rather than int has advantages for array index
// Suggest code style: type *var = malloc(sizeof(*var) * N);
// No casting of return
// Use sizeof() with target pointer name rather than target type.
char *bytes1 = malloc(sizeof * bytes1 * n);
Initialize(bytes, n); //TBD code for OP-best to not work w/uninitialized data
// short_ints = (short *) malloc( 2048 );
// This is weak as `sizeof(short)!=2` is possible
short *short_ints = malloc(sizeof * short_ints * n/2);
charpair_short(short_ints, bytes1, n);
char *bytes2 = malloc(sizeof * bytes2 * n);
short_charpair(bytes2, short_ints, n);
compare(bytes1, bytes2, n); // TBD code for OP
// epilogue
free(bytes1);
free(short_ints);
free(bytes2);
return 0;
}
Avoided the union approach as that is platform endian dependent.
Here's a program that demonstrates that you are experiencing the problem associated with bit-shifting signed integral values.
#include <stdio.h>
#include <stdlib.h>
void testCore(char bytes1[],
char bytes2[],
short short_ints[],
int size)
{
int i = 0;
int j = 0;
for ( i=0; i<size; i+=2)
{
short_ints[j] = bytes1[i+1] << 8 | bytes1[i] ;
j++;
}
j = 0;
for ( i=0; i<size; i+=2)
{
bytes2[i+1] = (short_ints[j] >> 8) & 0xff;
bytes2[i] = (short_ints[j]) ;
j++;
}
for ( i=0; i<size; ++i)
{
if ( bytes1[i] != bytes2[i] )
{
printf("%d-th element is not equal\n", i);
}
}
}
void test1()
{
char bytes1[4] = {-10, 0, 0, 0};
char bytes2[4];
short short_ints[2];
testCore(bytes1, bytes2, short_ints, 4);
}
void test2()
{
char bytes1[4] = {10, 0, 0, 0};
char bytes2[4];
short short_ints[2];
testCore(bytes1, bytes2, short_ints, 4);
}
int main()
{
printf("Calling test1 ...\n");
test1();
printf("Done\n");
printf("Calling test2 ...\n");
test2();
printf("Done\n");
return 0;
}
Output of the program:
Calling test1 ...
1-th element is not equal
Done
Calling test2 ...
Done
Udate
Here's a version of testCore that works for me:
void testCore(char bytes1[],
char bytes2[],
short short_ints[],
int size)
{
int i = 0;
int j = 0;
unsigned char c1;
unsigned char c2;
unsigned short s;
for ( i=0; i<size; i+=2)
{
c1 = bytes1[i];
c2 = bytes1[i+1];
short_ints[j] = (c2 << 8) | c1;
j++;
}
j = 0;
for ( i=0; i<size; i+=2)
{
s = short_ints[j];
s = s >> 8;
bytes2[i+1] = s;
bytes2[i] = short_ints[j] & 0xff;
j++;
}
for ( i=0; i<size; ++i)
{
if ( bytes1[i] != bytes2[i] )
{
printf("%d-th element is not equal\n", i);
}
}
}
It is tested with:
char bytes1[4] = {-10, 0, 25, -4};
and
char bytes1[4] = {10, -2, 25, 4};
Well, what you need is a UNION:
#include <stdio.h>
#include <string.h>
union MyShort {
short short_value;
struct {
char byte1;
char byte2;
};
};
int main(int argc, const char * argv[])
{
char a[4]="abcd";
char b[4]="1234";
short c[5]; c[4]=0;
union MyShort d;
for (int i = 0; i<4; i++) {
d.byte1 = a[i];
d.byte2 = b[i];
c[i] = d.short_value;
}//next i
printf("%s\n", (char*)c);
return 0;
}
the result should be a1b2c3d4.

Resources