I have this code to get from binary to decimal:
#include <stdio.h>
#include <math.h>
#include <stdint.h>
int main() {
printf("%lld\n", binaryToDecimal(11110000111100001111000011110000));
return 1;
}
long long binaryToDecimal(long long binary) {
int power = 0;
return binaryToDecimalHelper(binary, power);
}
long long binaryToDecimalHelper(long long binary, int power) {
if (binary != 0) {
long long i = binary % (double)10;
return (i * pow(2, power))
+ binaryToDecimalHelper(binary / 10, power + 1);
} else {
return 0;
}
}
It works fine for small values (up to 16 bits) , but for 32 bits (which is what I need) it just returns garbage.
The number 11110000111100001111000011110000 is of type int, which can't hold a number as big as 11110000111100001111000011110000 in your machine. It's better to use a string representation instead ("11110000111100001111000011110000") and adjust your algorithm, if you can.
if you are limited to 32 bits maximum this is one example:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void setStr(char *c, const char * x)
{
int i = 0;
while(x[i] != '\0')
{
c[i] = x[i];
i++;
}
}
void prepend(char* s, const char* t)
{
size_t len = strlen(t);
size_t i;
memmove(s + len, s, strlen(s) + 1);
for (i = 0; i < len; ++i)
{
s[i] = t[i];
}
}
int main(int argc, char const *argv[])
{
char *x = malloc(33*sizeof(char));
setStr(x, "111");
while (strlen(x) < 31) // not really necessary, but will help to 'cut' bytes if necessary
{
prepend(x,"0");
}
printf("%s\n", x);
int i = strtol(x,NULL,2);
printf("%d\n",i);
free(x);
return 0;
}
The first thing to be clear on is that your code does not convert anything to decimal, but rather to an int. Conversion to a decimal character string representation of that integer is performed by the printf() call.
The literal constant 11110000111100001111000011110000 is interpreted by the compiler (or would be if it were not so large) as a decimal value, and as such will require 104 bits to store;
i.e. log10(1111000011110000111100001111000010) / log10(210)
Representing a binary value with a decimal integer containing only 1 and 0 digits does not make much mathematical sense - though it may be convenient for small integers. A 64 bit unsigned long long is good for only 20 decimal digits (using just 1 and 0 - it can represent all 19 digit positive decimal integers, and some 20 digit values);
i.e. log10(210) * 6410
If you need longer binary values, then you should probably use a string representation. This is in fact simpler and more efficient in any case - you can use the fact that the machine representation of integers already is binary:
#include <stdio.h>
#include <stdint.h>
uint64_t binstrToInt( const char* binstr )
{
uint64_t result = 0 ;
int bit = 0;
while( binstr[bit] != '\0' )
{
if( binstr[bit] == '1' )
{
result |= 1 ;
}
bit++ ;
if( binstr[bit] != '\0' )
{
result <<= 1 ;
}
}
return result ;
}
int main()
{
printf("%llu\n", binstrToInt( "11110000111100001111000011110000" ) ) ;
return 0 ;
}
This would be easiest by far with a string as input instead of an int, and would allow longer numbers. Your problem is probably being caused by integer overflow.
String version:
#include <math.h>
#include <stdio.h>
#include <string.h>
int main() {
const char * const string_to_convert = "1010"
int result = 0;
for( int i = strlen(string_to_convert) - 1; i >= 0; --i ) {
if( string_to_convert[i] == '1' ) {
// Careful with pow() -- returns double, may round incorrectly
result += (int)pow( 10.0d, (double)i )
}
}
fprintf( stdout, "%d", result );
return 0;
}
Also, I'm not sure what the point of the return 1 is. Usually a non-zero return value from main indicates an error.
Related
I made this program that passes a binary number into a function and prints the decimal value of that binary number. The problem is that if the binary number gets big(like 11 numbers), the function prints something completely different. I tried to solved for a couple of hours now but nothing worked.
So my question is: how can I change my program so that it prints the right decimal number even when the binary number gets big?
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdint.h>
int int_log2_64(uint64_t x) { return 63 ^ __builtin_clzll(x); }
#define K(T) (((sizeof(#T) - 1) << 32) - T)
int digit_count(uint32_t x)
{
static uint64_t table[] = {
K(0), K(0), K(0),
K(10), K(10), K(10), // 64
K(100), K(100), K(100), // 512
K(1000), K(1000), K(1000), // 4096
K(10000), K(10000), K(10000), // 32k
K(100000), K(100000), K(100000), // 256k
K(1000000), K(1000000), K(1000000), // 2048k
K(10000000), K(10000000), K(10000000), // 16M
K(100000000), K(100000000), K(100000000), // 128M
K(1000000000), K(1000000000), K(1000000000), // 1024M
K(1000000000), K(1000000000) // 4B
};
int lg2 = int_log2_64(x);
uint64_t n = (uint64_t)(x) + table[lg2];
return n >> 32;
}
void binaryToDecimal(long long int bin)
{
int l = digit_count(bin);
char str[l];
itoa(bin, str, 10);
float sum[l];
int x = l - 1;
float answer;
for (int i = 0; i < l; i++)
{
if (str[i] == '1')
{
sum[i] = pow(2, x);
}
x--;
}
for (int i = 0; i < l; i++)
{
answer = answer + sum[i];
}
printf("%.0f", answer);
}
int main()
{
long long int bin = 10101101101;
binaryToDecimal(bin);
}
P.S. I changed to code to this and it works
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
void binaryToDecimal(char *bin)
{
int l = strlen(bin);
int x = l - 1;
float sum[l];
float answer = 0;
for (int i = 0; i < l; i++)
{
if (bin[i] == '1')
answer += pow(2, x);
else
sum[i] = 0;
x--;
}
printf("%.0f", answer);
}
int main()
{
binaryToDecimal("010101101101");
}
How can I change my program so that it prints the right decimal number even when the binary number gets big?
By making your bin variable a string.
That is, you want
void binaryToDecimal(const char *str)
{
...
}
and then you can call things like
binaryToDecimal("101111000110000101001110");
When doing base conversions, I believe it is always a mistake to use an integer variable for the input. If I say
int x = 12;
it is not true that "x is a decimal integer". x is an integer, period — I just happened to use a decimal constant to get a value into it. Or if I say
int y = 0x7b;
then it's not meaningful to say that y is hexadecimal — again, y is just an integer.
The base in which an integer is represented only matters:
on input, when we read a number from the user using scanf with the %d, %o, or %x formats
when converting a string with the standard library atoi or strtol functions
on output, when we print a number using printf with the %d, %o, or %x formats
But in all of those cases, the representation where the base matters is a string of digit characters, not an integer.
There are two reasons not to write a "binary to ..." function that accepts an integer. One is that, as you've seen, it artificially and unnecessarily limits the range of the numbers you can convert. But the even bigger reason is that confuses the heck out of your readers, because it's just wrong. If I see a function call
f(1001)
I think to myself, "Okay, the constant value one thousand and one is being passed to function f." There is no circumstance under which I would imagine that it was actually trying to pass the binary number nine.
Your code is extremely overcomplicated. Never use floats to do integer calculations.
unsigned binaryToDecimal(long long int bin)
{
unsigned answer = 0;
int shift = 0;
while(bin)
{
answer += (bin % 10) << shift++;
bin /= 10;
}
return answer;
}
int main()
{
long long int bin = 10101101101;
printf("%u\n", binaryToDecimal(bin));
}
https://godbolt.org/z/5E8Gv6oqf
Or use strings to pass the binary number :
unsigned binaryToDecimal(char *str)
{
unsigned answer = 0;
while(*str)
{
answer <<= 1;
answer += *str++ == '1';
}
return answer;
}
int main()
{
printf("%u\n", binaryToDecimal("10101101101"));
}
https://godbolt.org/z/4vfnETY1f
Right now I am trying to convert an int to a char in C programming. After doing research, I found that I should be able to do it like this:
int value = 10;
char result = (char) value;
What I would like is for this to return 'A' (and for 0-9 to return '0'-'9') but this returns a new line character I think.
My whole function looks like this:
char int2char (int radix, int value) {
if (value < 0 || value >= radix) {
return '?';
}
char result = (char) value;
return result;
}
to convert int to char you do not have to do anything
char x;
int y;
/* do something */
x = y;
only one int to char value as the printable (usually ASCII) digit like in your example:
const char digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
int inttochar(int val, int base)
{
return digits[val % base];
}
if you want to convert to the string (char *) then you need to use any of the stansdard functions like sprintf, itoa, ltoa, utoa, ultoa .... or write one yourself:
char *reverse(char *str);
const char digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
char *convert(int number, char *buff, int base)
{
char *result = (buff == NULL || base > strlen(digits) || base < 2) ? NULL : buff;
char sign = 0;
if (number < 0)
{
sign = '-';
}
if (result != NULL)
{
do
{
*buff++ = digits[abs(number % (base ))];
number /= base;
} while (number);
if(sign) *buff++ = sign;
if (!*result) *buff++ = '0';
*buff = 0;
reverse(result);
}
return result;
}
A portable way of doing this would be to define a
const char* foo = "0123456789ABC...";
where ... are the rest of the characters that you want to consider.
Then and foo[value] will evaluate to a particular char. For example foo[0] will be '0', and foo[10] will be 'A'.
If you assume a particular encoding (such as the common but by no means ubiquitous ASCII) then your code is not strictly portable.
Characters use an encoding (typically ASCII) to map numbers to a particular character. The codes for the characters '0' to '9' are consecutive, so for values less than 10 you add the value to the character constant '0'. For values 10 or more, you add the value minus 10 to the character constant 'A':
char result;
if (value >= 10) {
result = 'A' + value - 10;
} else {
result = '0' + value;
}
Converting Int to Char
I take it that OP wants more that just a 1 digit conversion as radix was supplied.
To convert an int into a string, (not just 1 char) there is the sprintf(buf, "%d", value) approach.
To do so to any radix, string management becomes an issue as well as dealing the corner case of INT_MIN
The following C99 solution returns a char* whose lifetime is valid to the end of the block. It does so by providing a compound literal via the macro.
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
// Maximum buffer size needed
#define ITOA_BASE_N (sizeof(unsigned)*CHAR_BIT + 2)
char *itoa_base(char *s, int x, int base) {
s += ITOA_BASE_N - 1;
*s = '\0';
if (base >= 2 && base <= 36) {
int x0 = x;
do {
*(--s) = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"[abs(x % base)];
x /= base;
} while (x);
if (x0 < 0) {
*(--s) = '-';
}
}
return s;
}
#define TO_BASE(x,b) itoa_base((char [ITOA_BASE_N]){0} , (x), (b))
Sample usage and tests
void test(int x) {
printf("base10:% 11d base2:%35s base36:%7s ", x, TO_BASE(x, 2), TO_BASE(x, 36));
printf("%ld\n", strtol(TO_BASE(x, 36), NULL, 36));
}
int main(void) {
test(0);
test(-1);
test(42);
test(INT_MAX);
test(-INT_MAX);
test(INT_MIN);
}
Output
base10: 0 base2: 0 base36: 0 0
base10: -1 base2: -1 base36: -1 -1
base10: 42 base2: 101010 base36: 16 42
base10: 2147483647 base2: 1111111111111111111111111111111 base36: ZIK0ZJ 2147483647
base10:-2147483647 base2: -1111111111111111111111111111111 base36:-ZIK0ZJ -2147483647
base10:-2147483648 base2: -10000000000000000000000000000000 base36:-ZIK0ZK -2147483648
Ref How to use compound literals to fprintf() multiple formatted numbers with arbitrary bases?
Check out the ascii table
The values stored in a char are interpreted as the characters corresponding to that table. The value of 10 is a newline
So characters in C are based on ASCII (or UTF-8 which is backwards-compatible with ascii codes). This means that under the hood, "A" is actually the number "65" (except in binary rather than decimal). All a "char" is in C is an integer with enough bytes to represent every ASCII character. If you want to convert an int to a char, you'll need to instruct the computer to interpret the bytes of an int as ASCII values - and it's been a while since I've done C, but I believe the compiler will complain since char holds fewer bytes than int. This means we need a function, as you've written. Thus,
if(value < 10) return '0'+value;
return 'A'+value-10;
will be what you want to return from your function. Keep your bounds checks with "radix" as you've done, imho that is good practice in C.
1. Converting int to char by type casting
Source File charConvertByCasting.c
#include <stdio.h>
int main(){
int i = 66; // ~~Type Casting Syntax~~
printf("%c", (char) i); // (type_name) expression
return 0;
}
Executable charConvertByCasting.exe command line output:
C:\Users\boqsc\Desktop\tcc>tcc -run charconvert.c
B
Additional resources:
https://www.tutorialspoint.com/cprogramming/c_type_casting.htm
https://www.tutorialspoint.com/cprogramming/c_data_types.htm
2. Convert int to char by assignment
Source File charConvertByAssignment.c
#include <stdio.h>
int main(){
int i = 66;
char c = i;
printf("%c", c);
return 0;
}
Executable charConvertByAssignment.exe command line output:
C:\Users\boqsc\Desktop\tcc>tcc -run charconvert.c
B
You can do
char a;
a = '0' + 5;
You will get character representation of that number.
Borrowing the idea from the existing answers, i.e. making use of array index.
Here is a "just works" simple demo for "integer to char[]" conversion in base 10, without any of <stdio.h>'s printf family interfaces.
Test:
$ cc -o testint2str testint2str.c && ./testint2str
Result: 234789
Code:
#include <stdio.h>
#include <string.h>
static char digits[] = "0123456789";
void int2str (char *buf, size_t sz, int num);
/*
Test:
cc -o testint2str testint2str.c && ./testint2str
*/
int
main ()
{
int num = 234789;
char buf[1024] = { 0 };
int2str (buf, sizeof buf, num);
printf ("Result: %s\n", buf);
}
void
int2str (char *buf, size_t sz, int num)
{
/*
Convert integer type to char*, in base-10 form.
*/
char *bufp = buf;
int i = 0;
// NOTE-1
void __reverse (char *__buf, int __start, int __end)
{
char __bufclone[__end - __start];
int i = 0;
int __nchars = sizeof __bufclone;
for (i = 0; i < __nchars; i++)
{
__bufclone[i] = __buf[__end - 1 - i];
}
memmove (__buf, __bufclone, __nchars);
}
while (num > 0)
{
bufp[i++] = digits[num % 10]; // NOTE-2
num /= 10;
}
__reverse (buf, 0, i);
// NOTE-3
bufp[i] = '\0';
}
// NOTE-1:
// "Nested function" is GNU's C Extension. Put it outside if not
// compiled by GCC.
// NOTE-2:
// 10 can be replaced by any radix, like 16 for hexidecimal outputs.
//
// NOTE-3:
// Make sure inserting trailing "null-terminator" after all things
// done.
NOTE-1:
"Nested function" is GNU's C Extension. Put it outside if not
compiled by GCC.
NOTE-2:
10 can be replaced by any radix, like 16 for hexidecimal outputs.
NOTE-3:
Make sure inserting trailing "null-terminator" after all things
done.
I have to write a C program for one of my classes that converts a given binary number to decimal. My program works for smaller inputs, but not for larger ones. I believe this may be due to the conversion specifier I am using for scanf() but I am not positive. My code is below
#include<stdio.h>
#include<math.h>
int main(void)
{
unsigned long inputNum = 0;
int currentBinary = 0;
int count = 0;
float decimalNumber = 0;
printf( "Input a binary number: " );
scanf( "%lu", &inputNum );
while (inputNum != 0)
{
currentBinary = inputNum % 10;
inputNum = inputNum / 10;
printf("%d\t%d\n", currentBinary, inputNum);
decimalNumber += currentBinary * pow(2, count);
++count;
}
printf("Decimal conversion: %.0f", decimalNumber);
return 0;
}
Running with a small binary number:
Input a binary number: 1011
1 101
1 10
0 1
1 0
Decimal conversion: 11
Running with a larger binary number:
Input a binary number: 1000100011111000
2 399133551
1 39913355
5 3991335
5 399133
3 39913
3 3991
1 399
9 39
9 3
3 0
Decimal conversion: 5264
"1000100011111000" is a 20 digit number. Certainly unsigned long is too small on your platform.
unsigned long is good - up to at least 10 digits.1
unsigned long long is better - up to at least 20 digits.1
To get past that:
Below is an any size conversion by reading 1 char at a time and forming an unbounded string.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
// Double the decimal form of string: "512" --> "1024"
char *sdouble(char *s, size_t *len, int carry) {
size_t i = *len;
while (i > 0) {
i--;
int sum = (s[i] - '0')*2 + carry;
s[i] = sum%10 + '0';
carry = sum/10;
}
if (carry) {
(*len)++;
s = realloc(s, *len + 1); // TBD OOM check
memmove(&s[1], s, *len);
s[0] = carry + '0';
}
return s;
}
int main(void) {
int ch;
size_t len = 1;
char *s = malloc(len + 1); // TBD OOM check
strcpy(s, "0");
while ((ch = fgetc(stdin)) >= '0' && ch <= '1') {
s = sdouble(s, &len, ch - '0');
}
puts(s);
free(s);
return 0;
}
100 digits
1111111111000000000011111111110000000000111111111100000000001111111111000000000011111111110000000000
1266413867935323811836706421760
1 When the lead digit is 0 or 1.
When you do this for a large number inputNum
currentBinary = inputNum % 10;
its top portion gets "sliced off" on conversion to int. If you would like to stay within the bounds of an unsigned long, switch currentBinary to unsigned long as well, and use an unsigned long format specifier in printf. Moreover, unsigned long may not be sufficiently large on many platforms, so you need to use unsigned long long.
Demo.
Better yet, switch to reading the input in a string, validating it to be zeros and ones (you have to do that anyway) and do the conversion in a cleaner character-by-character way. This would let you go beyond the 64-bit of 19 binary digits to have a full-scale int input.
unsigned long supports a maximum number of 4294967295, which means in the process of scanf( "%lu", &inputNum ); you've sliced the decimal number 1000100011111000 to a 32-bit unsigned long number.
I think scanf inputNum to a string would help a lot. In the while loop condition check if the string is empty now, and in the loop body get the last char of the string, detect if it's an '1' of a '0', and then calculate the binary number using this info.
I was tasked with writing a binary to decimal converted with taking larger binary inputs, but using embedded C programming in which we are not allowed to use library functions such as strlen. I found a simpler way to write this conversion tool using C, with both strlen, and also sizeof, as shown in the code below. Hope this helps. As you can see, strlen is commented out but either approach works fine. Sizeof just accounts for the 0 elecment in the array and that is why sizeof (number) -1 is used. Cheers!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
const char number[] = "100111111111111111111111";
int binToDec(char *);
int main()
{
printf("Output: %d", binToDec(&number));
}
int binToDec(char *n)
{
char *num = n;
int decimal_value = 0;
int base = 1;
int i;
int len = sizeof(number)-1;
//int len = strlen(number);
for (i=len-1; i>=0; i--)
{
if (num[i] == '1')
decimal_value += base;
base = base * 2;
}
return decimal_value;
}
I am trying to create a script that will convert decimals to binary based on specified size and then reverse the process, meaning from binary to decimal. So far the script and the output from my point of view (beginner) the script looks correct. I can convert all numbers from decimal to binary and vice versa. I am stack on the last part, that I am trying to convert the epoch time from a 64 bit binary number to decimal. I can not understand where I am going wrong since the rest of the numbers seem to recovered correctly. The source points that I found the scripts that I am using are Binary to Decimal and Decimal to Binary.
Update: modified code to short version:
I have modified the code to simply demonstrate the problem. The code works fine up to 32 bit binary conversion. But since I need to convert up to 64 I do not know how to do that. I noticed that because I used before int I reached the maximum limitations 32 bits, so I modified that to long long int to reach the 64 bit.
I have provided a sample of simple conversion of decimal as 1 in 32 bit format and 64 that demonstrate the problem. The epoch time is the desired output but I need to verify that the code works before I attempt the conversion.
Sample of code:
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <inttypes.h>
#define MAX_CHARACTERS 65
typedef struct rec {
char transmit[MAX_CHARACTERS];
char receive[MAX_CHARACTERS];
}RECORD;
char *decimal_to_binary(int n , int num); /* Define function */
char *decimal_to_binary(int n , int num) {
long long int c, d, count;
char *pointer;
count = 0;
pointer = (char*) malloc( num + 1 );
if ( pointer == NULL )
exit(EXIT_FAILURE);
for ( c = num - 1; c >= 0; c-- ) {
d = n >> c;
if ( d & 1 )
*( pointer + count ) = 1 + '0';
else
*( pointer + count ) = 0 + '0';
count++;
}
*( pointer + count ) = '\0';
return pointer;
}
int binary_decimal(long long int n); /* Define function */
int binary_decimal(long long int n) { /* Function to convert binary to decimal.*/
int decimal=0, i=0, rem;
while (n!=0) {
rem = n%10;
n/=10;
decimal += rem*pow(2,i);
++i;
}
return decimal;
}
int main(void) {
RECORD *ptr_record;
ptr_record = (RECORD *) malloc (sizeof(RECORD));
if (ptr_record == NULL) {
printf("Out of memmory!\nExit!\n");
exit(0);
}
int LI_d = 1;
char *LI_b = decimal_to_binary(LI_d,32);
memset( (*ptr_record).transmit , '\0' , sizeof((*ptr_record).transmit) );
strncat((*ptr_record).transmit , LI_b , strlen(LI_b) );
printf("LI: %s\n",(*ptr_record).transmit);
//transmit and receive
memset( (*ptr_record).receive , '\0' , sizeof((*ptr_record).receive) );
strncpy( (*ptr_record).receive , (*ptr_record).transmit , strlen((*ptr_record).transmit) );
char *LI_rcv_b = strndup( (*ptr_record).receive , 64 );
int LI_rcv_i = atoi (LI_rcv_b);
int final_LI = binary_decimal(LI_rcv_i);
printf("Final_LI: %i\n",final_LI);
free( ptr_record );
return 0;
}
Sample of output for 32 bit conversion:
LI: 00000000000000000000000000000001
Final_LI: 1
Sample of output for 64 bit conversion:
LI: 0000000000000000000000000000000100000000000000000000000000000001
Final_LI: -1
decimal_to_binary(int n, ...): better to use unsigned math
//char *decimal_to_binary(int n, int num) {
char *decimal_to_binary(unsigned long long n, int num) {
// long long int c, d, count;
unsigned long long int d;
int c, count;
char *pointer;
count = 0;
pointer = malloc(num + 1); // drop cast
if (pointer == NULL)
exit(EXIT_FAILURE);
for (c = num - 1; c >= 0; c--) {
d = n >> c;
if (d & 1)
*(pointer + count) = 1 + '0';
else
*(pointer + count) = 0 + '0';
count++;
}
*(pointer + count) = '\0';
return pointer;
}
Simplify binary_decimal(). Again use unsigned math, drop pow()
/* Function to convert binary to decimal.*/
unsigned long binary_decimal(unsigned long long int n) {
unsigned long decimal = 0;
while (n != 0) {
decimal *= 2;
decimal += n % 10;
n /= 10;
}
return decimal;
}
main() has lots of issues
int main(void) {
RECORD *ptr_record;
ptr_record = malloc(sizeof(RECORD)); // drop cast
if (ptr_record == NULL) {
printf("Out of memory!\nExit!\n"); // spelling fix
exit(0);
}
// use unsigned long long
unsigned long long LI_d = 1;
LI_d = (unsigned long long) -1;
char *LI_b = decimal_to_binary(LI_d, 32);
memset((*ptr_record).transmit, '\0', sizeof((*ptr_record).transmit));
// strncat((*ptr_record).transmit, LI_b, strlen(LI_b));
strncat((*ptr_record).transmit, LI_b, sizeof((*ptr_record).transmit) - 1);
printf("LI: %s\n", (*ptr_record).transmit);
//transmit and receive
memset((*ptr_record).receive, '\0', sizeof((*ptr_record).receive));
// strncpy((*ptr_record).receive, (*ptr_record).transmit, strlen((*ptr_record).transmit));
strncpy((*ptr_record).receive, (*ptr_record).transmit, sizeof((*ptr_record).transmit) - 1);
// char *LI_rcv_b = strndup((*ptr_record).receive, 64);
char *LI_rcv_b = strndup((*ptr_record).receive, MAX_CHARACTERS);
// At this point, approach is in error
// Cannot take a 64-decimal digit string and convert to a typical long long.
// int LI_rcv_i = atoi(LI_rcv_b);
// int final_LI = binary_decimal(LI_rcv_i);
// printf("Final_LI: %i\n", final_LI);
// Suspect you want to convert 64-binary digit string to a 64-bit integer
// maybe by somehow using binary_decimal - suggest re-write of that function
unsigned long long LI_rcv_i = strtoull(LI_rcv_b, NULL, 2);
printf("Final_LI: %llu\n", LI_rcv_i);
free(ptr_record);
return 0;
}
Output
LI: 11111111111111111111111111111111
Final_LI: 4294967295
I have a simple code to convert binary to decimal numbers. In my compiler, the decomposition works just fine for number less than 1000, beyond the output is always the same 1023. Anybody has an idea ?
#include <stdio.h>
#include <stdlib.h>
// how many power of ten is there in a number
// (I don't use the pow() function to avoid trouble with floating numbers)
int residu(int N)
{
int i=0;
while(N>=1){
N=N/10;
i++;
}
return i;
}
//exponentiating a number a by a number b
int power(int a, int b){
int i;
int res=1;
for (i=0;i<b;i++){res=a*res;}
return res;
}
//converting a number N
int main()
{
int i;
//the number to convert
int N;
scanf("%d",&N);
//the final decimal result
int res=0;
//we decompose N by descending powers of 10, and M is the rest
int M=0;
for(i=0;i<residu(N);i++){
// simple loop to look if there is a power of (residu(N)-1-i) in N,
// if yes we increment the binary decomposition by
// power(2,residu(N)-1-i)
if(M+ power(10,residu(N)-1-i) <= N)
{
M = M+power(10,residu(N)-1-i);
res=power(2,residu(N)-1-i)+res;
}
}
printf("%d\n",res);
}
Yes try this :
#include <stdio.h>
int main(void)
{
char bin; int dec = 0;
while (bin != '\n') {
scanf("%c",&bin);
if (bin == '1') dec = dec * 2 + 1;
else if (bin == '0') dec *= 2; }
printf("%d\n", dec);
return 0;
}
Most likely this is because you are using an int to store your binary number. An int will not store numbers above 2^31, which is 10 digits long, and 1023 is the largest number you can get with 10 binary digits.
It would be much easier for you to read your input number as a string, and then process each character of the string.
After a little experimentation, I think that your program is intended to accept a number consisting of 1's and 0's only as a base-10 number (the %d reads a decimal number). For example, given input 10, it outputs 2; given 1010, it outputs 10; given 10111001, it outputs 185.
So far, so good. Unfortunately, given 1234, it outputs 15, which is a little unexpected.
If you are running on a machine where int is a 32-bit signed value, then you can't enter a number with more than 10 digits, because you overflow the limit of a 32-bit int (which can handle ±2 billion, in round terms). The scanf() function doesn't handle overflows well.
You could help yourself by echoing your inputs; this is a standard debugging technique. Make sure the computer got the value you are expecting.
I'm not going to attempt to fix the code because I think you're going about the problem in completely the wrong way. (I'm not even sure whether it's best described as binary to decimal, or decimal to binary, or decimal to binary to decimal!) You would do better to read the input as a string of (up to 31) characters, then validate that each one is either a 0 or a 1. Assuming that's correct, then you can process the string very straight-forwardly to generate a value which can be formatted by printf() as a decimal.
Shift left is the same than multiply by 2 and is more efficient, so I think it is a more c-like answer:
#include <stdio.h>
#include <stdlib.h>
int bin2int(const char *bin)
{
int i, j;
j = sizeof(int)*8;
while ( (j--) && ((*bin=='0') || (*bin=='1')) ) {
i <<= 1;
if ( *bin=='1' ) i++;
bin++;
}
return i;
}
int main(void)
{
char* input = NULL;
size_t size = 0;
while ( getline(&input, &size, stdin) > 0 ) {
printf("%i\n", bin2int(input));
}
free(input);
}
#include <stdio.h> //printf
#include <string.h> //strlen
#include <stdint.h> //uintX_t or use int instead - depend on platform.
/* reverse string */
char *strrev(char *str){
int end = strlen(str)-1;
int start = 0;
while( start<end ){
str[start] ^= str[end];
str[end] ^= str[start];
str[start] ^= str[end];
++start;
--end;
}
return str;
}
/* transform binary string to integer */
uint32_t binstr2int(char *bs){
uint32_t ret = 0;
uint32_t val = 1;
while(*bs){
if (*bs++ == '1') ret = ret + val;
val = val*2;
}
return ret;
}
int main(void){
char binstr[] = "1010101001010101110100010011111"; //1428875423
printf("Binary: %s, Int: %d\n", binstr, binstr2int(strrev(binstr)));
return 0;
}