I can use the strtol function for turning a base36 based value (saved as a string) into a long int:
long int val = strtol("ABCZX123", 0, 36);
Is there a standard function that allows the inversion of this? That is, to convert a long int val variable into a base36 string, to obtain "ABCZX123" again?
There's no standard function for this. You'll need to write your own one.
Usage example: https://godbolt.org/z/MhRcNA
const char digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
char *reverse(char *str)
{
char *end = str;
char *start = str;
if(!str || !*str) return str;
while(*(end + 1)) end++;
while(end > start)
{
int ch = *end;
*end-- = *start;
*start++ = ch;
}
return str;
}
char *tostring(char *buff, long long num, int base)
{
int sign = num < 0;
char *savedbuff = buff;
if(base < 2 || base >= sizeof(digits)) return NULL;
if(buff)
{
do
{
*buff++ = digits[abs(num % base)];
num /= base;
}while(num);
if(sign)
{
*buff++ = '-';
}
*buff = 0;
reverse(savedbuff);
}
return savedbuff;
}
One of the missing attributes of this "Convert long integer to base 36 string" is string management.
The below suffers from a potential buffer overflow when destination is too small.
char *long_to_string(char *destination, long num, int base);
(Assuming 32-bit long) Consider the overflow of below as the resultant string should be "-10000000000000000000000000000000", which needs 34 bytes to encode the string.
char buffer[33]; // Too small
long_to_string(buffer, LONG_MIN, 2); // Oops!
An alternative would pass in the buffer size and then provide some sort of error signaling when the buffer is too small.
char* longtostr(char *dest, size_t size, long a, int base)
Since C99, code instead could use a compound literal to provide the needed space - without calling code trying to compute the needed size nor explicitly allocate the buffer.
The returned string pointer from TO_BASE(long x, int base) is valid until the end of the block.
#include <assert.h>
#include <limits.h>
#define TO_BASE_N (sizeof(long)*CHAR_BIT + 2)
// v. compound literal .v
#define TO_BASE(x, b) my_to_base((char [TO_BASE_N]){""}, (x), (b))
char *my_to_base(char *buf, long a, int base) {
assert(base >= 2 && base <= 36);
long i = a < 0 ? a : -a; // use the negative side - this handle _MIN, _MAX nicely
char *s = &buf[TO_BASE_N - 1];
*s = '\0';
do {
s--;
*s = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"[-(i % base)];
i /= base;
} while (i);
if (a < 0) {
s--;
*s = '-';
}
// Could add memmove here to move the used buffer to the beginning
return s;
}
#include <limits.h>
#include <stdio.h>
int main(void) {
long ip1 = 0x01020304;
long ip2 = 0x05060708;
long ip3 = LONG_MIN;
printf("%s %s\n", TO_BASE(ip1, 16), TO_BASE(ip2, 16), TO_BASE(ip3, 16));
printf("%s %s\n", TO_BASE(ip1, 2), TO_BASE(ip2, 2), TO_BASE(ip3, 2));
puts(TO_BASE(ip1, 8));
puts(TO_BASE(ip1, 36));
puts(TO_BASE(ip3, 10));
}
Here is another option with no need for source array of charaters, but less portable since not all character encodings have contiguous alphabetic characters, for example EBCDIC. Test HERE
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include <limits.h>
char get_chars(long long value)
{
if (value >= 0 && value <= 9)
return value + '0';
else
return value - 10 + 'A';
}
void reverse_string(char *str)
{
int len = strlen(str);
for (int i = 0; i < len/2; i++)
{
char temp = str[i];
str[i] = str[len - i - 1];
str[len - i - 1] = temp;
}
}
char* convert_to_base(char *res, int base, long long input)
{
bool flag = 0;
int index = 0;
if(input < 0){
input = llabs(input);
flag = 1;
}
else if(input == 0){
res[index++] = '0';
res[index] = '\0';
return res;
}
while(input > 0)
{
res[index++] = get_chars(input % base);
input /= base;
}
if(flag){
res[index++] = '-';
}
res[index] = '\0';
reverse_string(res);
return res;
}
int main() {
long long input = 0;
printf("** Integer to Base-36 **\n ");
printf("Enter a valid number: ");
scanf("%lld", &input);
if(input >= LLONG_MAX && input <= LLONG_MIN){
printf("Invalid number");
return 0;
}
int base = 36;
char res[100];
printf("%lld -> %s\n", input, convert_to_base(res, base, input));
return 0;
}
I'm trying to convert an integer to a binary String (see code below). I've already looked at several similar code snippets, and can't seem to find the reason as to why this does not work. It not only doesn't produce the correct output, but no output at all. Can somebody please explain to me in detail what I'm doing wrong?
#include <stdio.h>
#include <stdlib.h>
char* toBinaryString(int n) {
char *string = malloc(sizeof(int) * 8 + 1);
if (!string) {
return NULL;
}
for (int i = 31; i >= 0; i--) {
string[i] = n & 1;
n >> 1;
}
return string;
}
int main() {
char* string = toBinaryString(4);
printf("%s", string);
free(string);
return 0;
}
The line
string[i] = n & 1;
is assigning integers 0 or 1 to string[i]. They are typically different from the characters '0' and '1'. You should add '0' to convert the integers to characters.
Also, as #EugeneSh. pointed out, the line
n >> 1;
has no effect. It should be
n >>= 1;
to update the n's value.
Also, as #JohnnyMopp pointed out, you should terminate the string by adding a null-character.
One more point it that you should check if malloc() succeeded. (It is done in the function toBinaryString, but there is no check in main() before printing its result)
Finally, It doesn't looks so good to use a magic number 31 for the initialization of for loop while using sizeof(int) for the size for malloc().
Fixed code:
#include <stdio.h>
#include <stdlib.h>
char* toBinaryString(int n) {
int num_bits = sizeof(int) * 8;
char *string = malloc(num_bits + 1);
if (!string) {
return NULL;
}
for (int i = num_bits - 1; i >= 0; i--) {
string[i] = (n & 1) + '0';
n >>= 1;
}
string[num_bits] = '\0';
return string;
}
int main() {
char* string = toBinaryString(4);
if (string) {
printf("%s", string);
free(string);
} else {
fputs("toBinaryString() failed\n", stderr);
}
return 0;
}
The values you are putting into the string are either a binary zero or a binary one, when what you want is the digit 0 or the digit one. Try string[i] = (n & 1) + '0';. Binary 0 and 1 are non-printing characters, so that's why you get no output.
#define INT_WIDTH 32
#define TEST 1
char *IntToBin(unsigned x, char *buffer) {
char *ptr = buffer + INT_WIDTH;
do {
*(--ptr) = '0' + (x & 1);
x >>= 1;
} while(x);
return ptr;
}
#if TEST
#include <stdio.h>
int main() {
int n;
char str[INT_WIDTH+1]; str[INT_WIDTH]='\0';
while(scanf("%d", &n) == 1)
puts(IntToBin(n, str));
return 0;
}
#endif
I have a character representation of a binary number, and I wish to perform arithmetic, plus 1, on it. I want to keep the padding of 0.
Right now I have :
int value = fromBinary(binaryCharArray);
value++;
int fromBinary(char *s) {
return (int)strtol(s, NULL, 2);
}
I need to transform the value++ to binary representation and if I have 0 to pad I need to pad it.
0110 -> 6
6++ -> 7
7 -> 0111 <- that's what I should get from transforming it back in a character representation
In my problem it will never go above 15.
This is what I have so far
char *toBinary(int value)
{
char *binaryRep = malloc(4 * sizeof(char));
itoa(value, binaryRep, 2);
if (strlen(binaryRep) < 4)
{
int index = 0;
while (binaryRep[index] != '1')
{
binaryRep[index] = '0';
index++;
}
}
return binaryRep;
}
Try this
#include <stdio.h>
int main(void)
{
unsigned int x;
char binary[5]; /* You need 5 bytes for a 4 character string */
x = 6;
for (size_t n = 0 ; n < 4 ; ++n)
{
/* shift right `n' bits and check that the bit is set */
binary[3 - n] = (((x >> n) & 1) == 1) ? '1' : '0';
}
/* nul terminate `binary' so it's a valid c string */
binary[4] = '\0';
fprintf(stderr, "%s\n", binary);
return 0;
}
char *binaryRep = malloc(4* sizeof(char));
binaryRep[4] = '\0';
for (int i = (sizeof(int)) - 1; i >= 0; i--) {
binaryRep[i] = (value & (1 << i)) ? '1' : '0';
}
return binaryRep;
This does what I need.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
/*___________________________________________________________
*/
int from_bin(char *buff){
int d=0;
while(*buff){
d<<=1;
d+=(*buff=='1')?1:0;
buff++;
}
return d;
}
/*___________________________________________________________
*/
int to_bin(int d,char *buff,int len){
int ret=0;
if(len<4)return -1;
if(d & ~0xf){
ret=to_bin(d>>4,buff,len-4);
if(ret==-1) return -1;
buff+=ret;
}
buff[4]=0;
buff[3]=((d & 0x1)?'1':'0');
d>>=1;
buff[2]=((d & 0x1)?'1':'0');
d>>=1;
buff[1]=((d & 0x1)?'1':'0');
d>>=1;
buff[0]=((d & 0x1)?'1':'0');
d>>=1;
return ret+4;
}
/*___________________________________________________________
*/
int main(void){
int n;
char buff[33]="0011";
n=from_bin(buff);
n+=1;
if(to_bin(n,buff,8)==-1){
printf("ERROR: buffer too small\n");
}else{
printf("bin of %d= '%s'\n",n,buff);
}
return 0;
}
During left shift or right shift of a "decimal number", I want the result in binary format and not in decimal. I know how to convert decimal to binary. Is there any other way to capture intermediate binary values?
Here's how to print in hexadecimal, which you can pretty easily translate to binary in your head.
printf("0x%x\n", my_int);
Here's code to print a number in a binary format:
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
int main(int argc, char **argv)
{
char buf[65] = { 0 }, *endptr;
long my_int;
unsigned long tmp_int;
int i, j;
if (argc != 2 || (errno = 0, (my_int = strtol(argv[1], &endptr, 0)), errno) || *endptr)
exit((fprintf(stderr, "Usage: %s <number>\n", argv[0]), 1));
/* break my_int down into a binary character represenation */
tmp_int = my_int;
i = 0;
do
{
buf[i++] = ((tmp_int & 0x1) ? '1' : '0');
tmp_int >>= 1;
}
while (0 != tmp_int);
/* reverse buf for printing */
for (j = 0, --i; j < i; ++j, --i)
{
char c = buf[j];
buf[j] = buf[i];
buf[i] = c;
}
printf("%s\n", buf);
return 0;
}
I'm looking for a function to allow me to print the binary representation of an int. What I have so far is;
char *int2bin(int a)
{
char *str,*tmp;
int cnt = 31;
str = (char *) malloc(33); /*32 + 1 , because its a 32 bit bin number*/
tmp = str;
while ( cnt > -1 ){
str[cnt]= '0';
cnt --;
}
cnt = 31;
while (a > 0){
if (a%2==1){
str[cnt] = '1';
}
cnt--;
a = a/2 ;
}
return tmp;
}
But when I call
printf("a %s",int2bin(aMask)) // aMask = 0xFF000000
I get output like;
0000000000000000000000000000000000xtpYy (And a bunch of unknown characters.
Is it a flaw in the function or am I printing the address of the character array or something? Sorry, I just can't see where I'm going wrong.
NB The code is from here
EDIT: It's not homework FYI, I'm trying to debug someone else's image manipulation routines in an unfamiliar language. If however it's been tagged as homework because it's an elementary concept then fair play.
Here's another option that is more optimized where you pass in your allocated buffer. Make sure it's the correct size.
// buffer must have length >= sizeof(int) + 1
// Write to the buffer backwards so that the binary representation
// is in the correct order i.e. the LSB is on the far right
// instead of the far left of the printed string
char *int2bin(int a, char *buffer, int buf_size) {
buffer += (buf_size - 1);
for (int i = 31; i >= 0; i--) {
*buffer-- = (a & 1) + '0';
a >>= 1;
}
return buffer;
}
#define BUF_SIZE 33
int main() {
char buffer[BUF_SIZE];
buffer[BUF_SIZE - 1] = '\0';
int2bin(0xFF000000, buffer, BUF_SIZE - 1);
printf("a = %s", buffer);
}
A few suggestions:
null-terminate your string
don't use magic numbers
check the return value of malloc()
don't cast the return value of malloc()
use binary operations instead of arithmetic ones as you're interested in the binary representation
there's no need for looping twice
Here's the code:
#include <stdlib.h>
#include <limits.h>
char * int2bin(int i)
{
size_t bits = sizeof(int) * CHAR_BIT;
char * str = malloc(bits + 1);
if(!str) return NULL;
str[bits] = 0;
// type punning because signed shift is implementation-defined
unsigned u = *(unsigned *)&i;
for(; bits--; u >>= 1)
str[bits] = u & 1 ? '1' : '0';
return str;
}
Your string isn't null-terminated. Make sure you add a '\0' character at the end of the string; or, you could allocate it with calloc instead of malloc, which will zero the memory that is returned to you.
By the way, there are other problems with this code:
As used, it allocates memory when you call it, leaving the caller responsible for free()ing the allocated string. You'll leak memory if you just call it in a printf call.
It makes two passes over the number, which is unnecessary. You can do everything in one loop.
Here's an alternative implementation you could use.
#include <stdlib.h>
#include <limits.h>
char *int2bin(unsigned n, char *buf)
{
#define BITS (sizeof(n) * CHAR_BIT)
static char static_buf[BITS + 1];
int i;
if (buf == NULL)
buf = static_buf;
for (i = BITS - 1; i >= 0; --i) {
buf[i] = (n & 1) ? '1' : '0';
n >>= 1;
}
buf[BITS] = '\0';
return buf;
#undef BITS
}
Usage:
printf("%s\n", int2bin(0xFF00000000, NULL));
The second parameter is a pointer to a buffer you want to store the result string in. If you don't have a buffer you can pass NULL and int2bin will write to a static buffer and return that to you. The advantage of this over the original implementation is that the caller doesn't have to worry about free()ing the string that gets returned.
A downside is that there's only one static buffer so subsequent calls will overwrite the results from previous calls. You couldn't save the results from multiple calls for later use. Also, it is not threadsafe, meaning if you call the function this way from different threads they could clobber each other's strings. If that's a possibility you'll need to pass in your own buffer instead of passing NULL, like so:
char str[33];
int2bin(0xDEADBEEF, str);
puts(str);
Here is a simple algorithm.
void decimalToBinary (int num) {
//Initialize mask
unsigned int mask = 0x80000000;
size_t bits = sizeof(num) * CHAR_BIT;
for (int count = 0 ;count < bits; count++) {
//print
(mask & num ) ? cout <<"1" : cout <<"0";
//shift one to the right
mask = mask >> 1;
}
}
this is what i made to display an interger as a binairy code it is separated per 4 bits:
int getal = 32; /** To determain the value of a bit 2^i , intergers are 32bits long**/
int binairy[getal]; /** A interger array to put the bits in **/
int i; /** Used in the for loop **/
for(i = 0; i < 32; i++)
{
binairy[i] = (integer >> (getal - i) - 1) & 1;
}
int a , counter = 0;
for(a = 0;a<32;a++)
{
if (counter == 4)
{
counter = 0;
printf(" ");
}
printf("%i", binairy[a]);
teller++;
}
it could be a bit big but i always write it in a way (i hope) that everyone can understand what is going on. hope this helped.
#include<stdio.h>
//#include<conio.h> // use this if you are running your code in visual c++, linux don't
// have this library. i have used it for getch() to hold the screen for input char.
void showbits(int);
int main()
{
int no;
printf("\nEnter number to convert in binary\n");
scanf("%d",&no);
showbits(no);
// getch(); // used to hold screen...
// keep code as it is if using gcc. if using windows uncomment #include & getch()
return 0;
}
void showbits(int n)
{
int i,k,andmask;
for(i=15;i>=0;i--)
{
andmask = 1 << i;
k = n & andmask;
k == 0 ? printf("0") : printf("1");
}
}
Just a enhance of the answer from #Adam Markowitz
To let the function support uint8 uint16 uint32 and uint64:
#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
// Convert integer number to binary representation.
// The buffer must have bits bytes length.
void int2bin(uint64_t number, uint8_t *buffer, int bits) {
memset(buffer, '0', bits);
buffer += bits - 1;
for (int i = bits - 1; i >= 0; i--) {
*buffer-- = (number & 1) + '0';
number >>= 1;
}
}
int main(int argc, char *argv[]) {
char buffer[65];
buffer[8] = '\0';
int2bin(1234567890123, buffer, 8);
printf("1234567890123 in 8 bits: %s\n", buffer);
buffer[16] = '\0';
int2bin(1234567890123, buffer, 16);
printf("1234567890123 in 16 bits: %s\n", buffer);
buffer[32] = '\0';
int2bin(1234567890123, buffer, 32);
printf("1234567890123 in 32 bits: %s\n", buffer);
buffer[64] = '\0';
int2bin(1234567890123, buffer, 64);
printf("1234567890123 in 64 bits: %s\n", buffer);
return 0;
}
The output:
1234567890123 in 8 bits: 11001011
1234567890123 in 16 bits: 0000010011001011
1234567890123 in 32 bits: 01110001111110110000010011001011
1234567890123 in 64 bits: 0000000000000000000000010001111101110001111110110000010011001011
Two things:
Where do you put the NUL character? I can't see a place where '\0' is set.
Int is signed, and 0xFF000000 would be interpreted as a negative value. So while (a > 0) will be false immediately.
Aside: The malloc function inside is ugly. What about providing a buffer to int2bin?
A couple of things:
int f = 32;
int i = 1;
do{
str[--f] = i^a?'1':'0';
}while(i<<1);
It's highly platform dependent, but
maybe this idea above gets you started.
Why not use memset(str, 0, 33) to set
the whole char array to 0?
Don't forget to free()!!! the char*
array after your function call!
Two simple versions coded here (reproduced with mild reformatting).
#include <stdio.h>
/* Print n as a binary number */
void printbitssimple(int n)
{
unsigned int i;
i = 1<<(sizeof(n) * 8 - 1);
while (i > 0)
{
if (n & i)
printf("1");
else
printf("0");
i >>= 1;
}
}
/* Print n as a binary number */
void printbits(int n)
{
unsigned int i, step;
if (0 == n) /* For simplicity's sake, I treat 0 as a special case*/
{
printf("0000");
return;
}
i = 1<<(sizeof(n) * 8 - 1);
step = -1; /* Only print the relevant digits */
step >>= 4; /* In groups of 4 */
while (step >= n)
{
i >>= 4;
step >>= 4;
}
/* At this point, i is the smallest power of two larger or equal to n */
while (i > 0)
{
if (n & i)
printf("1");
else
printf("0");
i >>= 1;
}
}
int main(int argc, char *argv[])
{
int i;
for (i = 0; i < 32; ++i)
{
printf("%d = ", i);
//printbitssimple(i);
printbits(i);
printf("\n");
}
return 0;
}
//This is what i did when our teacher asked us to do this
int main (int argc, char *argv[]) {
int number, i, size, mask; // our input,the counter,sizeofint,out mask
size = sizeof(int);
mask = 1<<(size*8-1);
printf("Enter integer: ");
scanf("%d", &number);
printf("Integer is :\t%d 0x%X\n", number, number);
printf("Bin format :\t");
for(i=0 ; i<size*8 ;++i ) {
if ((i % 4 == 0) && (i != 0)) {
printf(" ");
}
printf("%u",number&mask ? 1 : 0);
number = number<<1;
}
printf("\n");
return (0);
}
the simplest way for me doing this (for a 8bit representation):
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
char *intToBinary(int z, int bit_length){
int div;
int counter = 0;
int counter_length = (int)pow(2, bit_length);
char *bin_str = calloc(bit_length, sizeof(char));
for (int i=counter_length; i > 1; i=i/2, counter++) {
div = z % i;
div = div / (i / 2);
sprintf(&bin_str[counter], "%i", div);
}
return bin_str;
}
int main(int argc, const char * argv[]) {
for (int i = 0; i < 256; i++) {
printf("%s\n", intToBinary(i, 8)); //8bit but you could do 16 bit as well
}
return 0;
}
Here is another solution that does not require a char *.
#include <stdio.h>
#include <stdlib.h>
void print_int(int i)
{
int j = -1;
while (++j < 32)
putchar(i & (1 << j) ? '1' : '0');
putchar('\n');
}
int main(void)
{
int i = -1;
while (i < 6)
print_int(i++);
return (0);
}
Or here for more readability:
#define GRN "\x1B[32;1m"
#define NRM "\x1B[0m"
void print_int(int i)
{
int j = -1;
while (++j < 32)
{
if (i & (1 << j))
printf(GRN "1");
else
printf(NRM "0");
}
putchar('\n');
}
And here is the output:
11111111111111111111111111111111
00000000000000000000000000000000
10000000000000000000000000000000
01000000000000000000000000000000
11000000000000000000000000000000
00100000000000000000000000000000
10100000000000000000000000000000
#include <stdio.h>
#define BITS_SIZE 8
void
int2Bin ( int a )
{
int i = BITS_SIZE - 1;
/*
* Tests each bit and prints; starts with
* the MSB
*/
for ( i; i >= 0; i-- )
{
( a & 1 << i ) ? printf ( "1" ) : printf ( "0" );
}
return;
}
int
main ()
{
int d = 5;
printf ( "Decinal: %d\n", d );
printf ( "Binary: " );
int2Bin ( d );
printf ( "\n" );
return 0;
}
Not so elegant, but accomplishes your goal and it is very easy to understand:
#include<stdio.h>
int binario(int x, int bits)
{
int matriz[bits];
int resto=0,i=0;
float rest =0.0 ;
for(int i=0;i<8;i++)
{
resto = x/2;
rest = x%2;
x = resto;
if (rest>0)
{
matriz[i]=1;
}
else matriz[i]=0;
}
for(int j=bits-1;j>=0;j--)
{
printf("%d",matriz[j]);
}
printf("\n");
}
int main()
{
int num,bits;
bits = 8;
for (int i = 0; i < 256; i++)
{
num = binario(i,bits);
}
return 0;
}
#include <stdio.h>
int main(void) {
int a,i,k=1;
int arr[32]; \\ taken an array of size 32
for(i=0;i <32;i++)
{
arr[i] = 0; \\initialised array elements to zero
}
printf("enter a number\n");
scanf("%d",&a); \\get input from the user
for(i = 0;i < 32 ;i++)
{
if(a&k) \\bit wise and operation
{
arr[i]=1;
}
else
{
arr[i]=0;
}
k = k<<1; \\left shift by one place evry time
}
for(i = 31 ;i >= 0;i--)
{
printf("%d",arr[i]); \\print the array in reverse
}
return 0;
}
void print_binary(int n) {
if (n == 0 || n ==1)
cout << n;
else {
print_binary(n >> 1);
cout << (n & 0x1);
}
}