How to convert Huge decimal value binary - c

I have a simple program in which program receive input from the file and converts decimal to binary and counts number of ones in its binary form?
Now For small values it is okay
and for huge values like 15755645551 , it's obviously not working
anyone has any idea how to resolve this problem?
anyone can try with my code?
thank you!!
Here is my code:
#include <stdio.h>
#include <stdlib.h>
#include <conio.h>
#include <string.h>
#define BUFFER 25
long number_read = 0;
long number_of_ones = 0;
long remainder_value = 0;
long binary = 0;
long base = 0;
long buff[BUFFER];
int main()
{
FILE *fp; //file pointer fp
fp = fopen("File.txt", "r+");
while (fgets(buff, sizeof(buff), fp) != NULL)
{
number_read = atoi(buff); //ASCII to integer
printf("\nnumber is=%d", number_read);
while (number_read > 0)
{
remainder_value = number_read % 2;
/* To count no.of 1s */
if (remainder_value == 1)
{
number_of_ones++;
}
binary = binary + remainder_value * base;
number_read = number_read / 2;
base = base * 10;
}
printf("\nNo.of 1's in It's binary representation is = %d\n", number_of_ones);
number_of_ones = 0;
}
fclose(fp);
return 0;
}

Because "long" type occupies 4 bytes as "int" type. You should change it to "long long" type.

Related

32 bit addition of two numbers in C

I have a code which performs the 32 bit addition of two numbers.I am generating 10 random 32 bit numbers and each of them are stored in two separate files as their corresponding hex values.The result is also stored in another file in the same way on the execution of the code i am getting some negative values for certain combinations.
Here is my code.
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#define TARGET_MAX 2147483647L
void main ()
{
int32_t A[10] = {0};
int32_t B[10] = {0};
int32_t S[10] = {0};
int i =0;
int n =10;
char hex[32] ={0};
time_t t;
FILE *fp = NULL;
FILE *fp1 = NULL;
FILE *fp2 = NULL;
fp = fopen("A.txt","w");
fp1 = fopen("B.txt","w");
fp2 = fopen("S.txt","w");
srand((unsigned) time(&t));
for(i=0;i<n;i++)
{
A[i] = rand() % TARGET_MAX;
if(fp != NULL)
{
sprintf(hex,"%x",A[i]);
fprintf(fp,"%s\n",hex);
memset(hex,0,sizeof(hex));
}
B[i] = rand() % TARGET_MAX;
if(fp1 != NULL)
{ sprintf(hex,"%x",B[i]);
fprintf(fp1,"%s\n",hex);
memset(hex,0,sizeof(hex));
}
S[i] = A[i] + B[i];
if(fp2 != NULL)
{ sprintf(hex,"%x",S[i]);
fprintf(fp2,"%s\n",hex);
memset(hex,0,sizeof(hex));
}
printf(" %d + %d = %d \n\r",A[i],B[i],S[i]);
}
fclose(fp);
fclose(fp1);
fclose(fp2);
}
Here is my output
vu2swz#PPDP01:~/vlsi_lab/exp6$ gcc add.c -o add
vu2swz#PPDP01:~/vlsi_lab/exp6$ ./add
1000086044 + 1204997665 = -2089883587
436835310 + 1696128436 = 2132963746
1624838244 + 335108562 = 1959946806
1782944281 + 1013582119 = -1498440896
1491331491 + 1257744454 = -1545891351
1676611730 + 773175875 = -1845179691
422206991 + 2136514004 = -1736246301
1622400103 + 152657712 = 1775057815
809410550 + 1662804335 = -1822752411
1396954314 + 742609108 = 2139563422
You're running into overflow.
In the instances where you're getting a negative number, the result is overflowing what can fix in a signed 32 bit integer.
Since the numbers you're generating are smaller than 231, their sum will be less than 232 which will fit in an unsigned 32 bit integer. So change your types to uint32_t and use the %u format specifier to print them.

How to convert epoch decimal time to 64 bit binary and back to decimal in C

I am trying to create a script that will convert decimals to binary based on specified size and then reverse the process, meaning from binary to decimal. So far the script and the output from my point of view (beginner) the script looks correct. I can convert all numbers from decimal to binary and vice versa. I am stack on the last part, that I am trying to convert the epoch time from a 64 bit binary number to decimal. I can not understand where I am going wrong since the rest of the numbers seem to recovered correctly. The source points that I found the scripts that I am using are Binary to Decimal and Decimal to Binary.
Update: modified code to short version:
I have modified the code to simply demonstrate the problem. The code works fine up to 32 bit binary conversion. But since I need to convert up to 64 I do not know how to do that. I noticed that because I used before int I reached the maximum limitations 32 bits, so I modified that to long long int to reach the 64 bit.
I have provided a sample of simple conversion of decimal as 1 in 32 bit format and 64 that demonstrate the problem. The epoch time is the desired output but I need to verify that the code works before I attempt the conversion.
Sample of code:
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <inttypes.h>
#define MAX_CHARACTERS 65
typedef struct rec {
char transmit[MAX_CHARACTERS];
char receive[MAX_CHARACTERS];
}RECORD;
char *decimal_to_binary(int n , int num); /* Define function */
char *decimal_to_binary(int n , int num) {
long long int c, d, count;
char *pointer;
count = 0;
pointer = (char*) malloc( num + 1 );
if ( pointer == NULL )
exit(EXIT_FAILURE);
for ( c = num - 1; c >= 0; c-- ) {
d = n >> c;
if ( d & 1 )
*( pointer + count ) = 1 + '0';
else
*( pointer + count ) = 0 + '0';
count++;
}
*( pointer + count ) = '\0';
return pointer;
}
int binary_decimal(long long int n); /* Define function */
int binary_decimal(long long int n) { /* Function to convert binary to decimal.*/
int decimal=0, i=0, rem;
while (n!=0) {
rem = n%10;
n/=10;
decimal += rem*pow(2,i);
++i;
}
return decimal;
}
int main(void) {
RECORD *ptr_record;
ptr_record = (RECORD *) malloc (sizeof(RECORD));
if (ptr_record == NULL) {
printf("Out of memmory!\nExit!\n");
exit(0);
}
int LI_d = 1;
char *LI_b = decimal_to_binary(LI_d,32);
memset( (*ptr_record).transmit , '\0' , sizeof((*ptr_record).transmit) );
strncat((*ptr_record).transmit , LI_b , strlen(LI_b) );
printf("LI: %s\n",(*ptr_record).transmit);
//transmit and receive
memset( (*ptr_record).receive , '\0' , sizeof((*ptr_record).receive) );
strncpy( (*ptr_record).receive , (*ptr_record).transmit , strlen((*ptr_record).transmit) );
char *LI_rcv_b = strndup( (*ptr_record).receive , 64 );
int LI_rcv_i = atoi (LI_rcv_b);
int final_LI = binary_decimal(LI_rcv_i);
printf("Final_LI: %i\n",final_LI);
free( ptr_record );
return 0;
}
Sample of output for 32 bit conversion:
LI: 00000000000000000000000000000001
Final_LI: 1
Sample of output for 64 bit conversion:
LI: 0000000000000000000000000000000100000000000000000000000000000001
Final_LI: -1
decimal_to_binary(int n, ...): better to use unsigned math
//char *decimal_to_binary(int n, int num) {
char *decimal_to_binary(unsigned long long n, int num) {
// long long int c, d, count;
unsigned long long int d;
int c, count;
char *pointer;
count = 0;
pointer = malloc(num + 1); // drop cast
if (pointer == NULL)
exit(EXIT_FAILURE);
for (c = num - 1; c >= 0; c--) {
d = n >> c;
if (d & 1)
*(pointer + count) = 1 + '0';
else
*(pointer + count) = 0 + '0';
count++;
}
*(pointer + count) = '\0';
return pointer;
}
Simplify binary_decimal(). Again use unsigned math, drop pow()
/* Function to convert binary to decimal.*/
unsigned long binary_decimal(unsigned long long int n) {
unsigned long decimal = 0;
while (n != 0) {
decimal *= 2;
decimal += n % 10;
n /= 10;
}
return decimal;
}
main() has lots of issues
int main(void) {
RECORD *ptr_record;
ptr_record = malloc(sizeof(RECORD)); // drop cast
if (ptr_record == NULL) {
printf("Out of memory!\nExit!\n"); // spelling fix
exit(0);
}
// use unsigned long long
unsigned long long LI_d = 1;
LI_d = (unsigned long long) -1;
char *LI_b = decimal_to_binary(LI_d, 32);
memset((*ptr_record).transmit, '\0', sizeof((*ptr_record).transmit));
// strncat((*ptr_record).transmit, LI_b, strlen(LI_b));
strncat((*ptr_record).transmit, LI_b, sizeof((*ptr_record).transmit) - 1);
printf("LI: %s\n", (*ptr_record).transmit);
//transmit and receive
memset((*ptr_record).receive, '\0', sizeof((*ptr_record).receive));
// strncpy((*ptr_record).receive, (*ptr_record).transmit, strlen((*ptr_record).transmit));
strncpy((*ptr_record).receive, (*ptr_record).transmit, sizeof((*ptr_record).transmit) - 1);
// char *LI_rcv_b = strndup((*ptr_record).receive, 64);
char *LI_rcv_b = strndup((*ptr_record).receive, MAX_CHARACTERS);
// At this point, approach is in error
// Cannot take a 64-decimal digit string and convert to a typical long long.
// int LI_rcv_i = atoi(LI_rcv_b);
// int final_LI = binary_decimal(LI_rcv_i);
// printf("Final_LI: %i\n", final_LI);
// Suspect you want to convert 64-binary digit string to a 64-bit integer
// maybe by somehow using binary_decimal - suggest re-write of that function
unsigned long long LI_rcv_i = strtoull(LI_rcv_b, NULL, 2);
printf("Final_LI: %llu\n", LI_rcv_i);
free(ptr_record);
return 0;
}
Output
LI: 11111111111111111111111111111111
Final_LI: 4294967295

32 bit binary-to-decimal conversion

I have this code to get from binary to decimal:
#include <stdio.h>
#include <math.h>
#include <stdint.h>
int main() {
printf("%lld\n", binaryToDecimal(11110000111100001111000011110000));
return 1;
}
long long binaryToDecimal(long long binary) {
int power = 0;
return binaryToDecimalHelper(binary, power);
}
long long binaryToDecimalHelper(long long binary, int power) {
if (binary != 0) {
long long i = binary % (double)10;
return (i * pow(2, power))
+ binaryToDecimalHelper(binary / 10, power + 1);
} else {
return 0;
}
}
It works fine for small values (up to 16 bits) , but for 32 bits (which is what I need) it just returns garbage.
The number 11110000111100001111000011110000 is of type int, which can't hold a number as big as 11110000111100001111000011110000 in your machine. It's better to use a string representation instead ("11110000111100001111000011110000") and adjust your algorithm, if you can.
if you are limited to 32 bits maximum this is one example:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void setStr(char *c, const char * x)
{
int i = 0;
while(x[i] != '\0')
{
c[i] = x[i];
i++;
}
}
void prepend(char* s, const char* t)
{
size_t len = strlen(t);
size_t i;
memmove(s + len, s, strlen(s) + 1);
for (i = 0; i < len; ++i)
{
s[i] = t[i];
}
}
int main(int argc, char const *argv[])
{
char *x = malloc(33*sizeof(char));
setStr(x, "111");
while (strlen(x) < 31) // not really necessary, but will help to 'cut' bytes if necessary
{
prepend(x,"0");
}
printf("%s\n", x);
int i = strtol(x,NULL,2);
printf("%d\n",i);
free(x);
return 0;
}
The first thing to be clear on is that your code does not convert anything to decimal, but rather to an int. Conversion to a decimal character string representation of that integer is performed by the printf() call.
The literal constant 11110000111100001111000011110000 is interpreted by the compiler (or would be if it were not so large) as a decimal value, and as such will require 104 bits to store;
i.e. log10(1111000011110000111100001111000010) / log10(210)
Representing a binary value with a decimal integer containing only 1 and 0 digits does not make much mathematical sense - though it may be convenient for small integers. A 64 bit unsigned long long is good for only 20 decimal digits (using just 1 and 0 - it can represent all 19 digit positive decimal integers, and some 20 digit values);
i.e. log10(210) * 6410
If you need longer binary values, then you should probably use a string representation. This is in fact simpler and more efficient in any case - you can use the fact that the machine representation of integers already is binary:
#include <stdio.h>
#include <stdint.h>
uint64_t binstrToInt( const char* binstr )
{
uint64_t result = 0 ;
int bit = 0;
while( binstr[bit] != '\0' )
{
if( binstr[bit] == '1' )
{
result |= 1 ;
}
bit++ ;
if( binstr[bit] != '\0' )
{
result <<= 1 ;
}
}
return result ;
}
int main()
{
printf("%llu\n", binstrToInt( "11110000111100001111000011110000" ) ) ;
return 0 ;
}
This would be easiest by far with a string as input instead of an int, and would allow longer numbers. Your problem is probably being caused by integer overflow.
String version:
#include <math.h>
#include <stdio.h>
#include <string.h>
int main() {
const char * const string_to_convert = "1010"
int result = 0;
for( int i = strlen(string_to_convert) - 1; i >= 0; --i ) {
if( string_to_convert[i] == '1' ) {
// Careful with pow() -- returns double, may round incorrectly
result += (int)pow( 10.0d, (double)i )
}
}
fprintf( stdout, "%d", result );
return 0;
}
Also, I'm not sure what the point of the return 1 is. Usually a non-zero return value from main indicates an error.

Reading/Parsing Command-Line Arguments in C; binary and decimal conversions

I have to write a program in C that reads and parses different command-line arguments but I have no idea where to start. All I have write now is the usage:
usage:
binary OPTION SIZE NUMBER
OPTION:
-b NUMBER is binary and output will be in decimal.
-d NUMBER is decimal and output will be in binary.
SIZE:
-8 input is an unsigned 8-bit integer.
-16 input is an unsigned 16-bit integer.
-32 input is an unsigned 32-bit integer.
-64 input is an unsigned 64-bit integer.
NUMBER:
number to be converted.
Other than this, I am not sure how to get user input and go about with the conversions. Any help would be great!
You can take the following code-snippet and work it on from there, but please note:
I do not see any need to make use of the SIZE argument. If this argument is essential for your exercise (or any other reason behind this question), then you'll need to think how you want to use it.
I do not perform any assertion on the input number, so the code below assumes a legal decimal input number when OPTION = -d and a legal binary input number when OPTION = -d.
There is probably more than one way to implement it, and the code below is merely an example.
#include <stdio.h>
#include <string.h>
unsigned long long str_to_ull(char* str,int base)
{
int i;
unsigned long long ull = 0;
for (i=0; str[i] != 0; i++)
{
ull *= base;
ull += str[i]-'0';
}
return ull;
}
void print_ull(unsigned long long ull,int base)
{
if (ull/base > 0)
print_ull(ull/base,base);
printf("%d",ull%base);
}
int main(int argc,char* argv[])
{
char* OPTION;
char* SIZE ;
char* NUMBER;
unsigned long long number = 0;
if (argc < 4)
{
printf("Missing input arguments\n");
return -1;
}
OPTION = argv[1];
SIZE = argv[2];
NUMBER = argv[3];
if (strcmp(OPTION,"-b") == 0)
{
number = str_to_ull(NUMBER,2);
print_ull(number,10);
return 0;
}
if (strcmp(OPTION,"-d") == 0)
{
number = str_to_ull(NUMBER,10);
print_ull(number,2);
return 0;
}
printf("Invalid input arguments\n");
return -1;
}
Here is simple example code to enter number from command-line and convert that decimal-integer to binary.
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
int main(int argc, char **argv)
{
//Variable declarations.
int i; //Holds input decimal number.
int t; //Holds temp mod i%2
int c = 0; //Holds the bit count.
char b[2] = ""; //Hold the temp binary bit string to be append.
char bin[64] = ""; //Holds the final binary string
//Check if the syntax is proper.
if (argc != 2)
{
printf("\nSYNTAX: binary <decimal>\n");
return 1;
}
//Convert the input string parameret into decimal.
i = atoi(argv[1]);
//reset the flags.
t = 0;
while (i != 0)
{
t = i % 2;
i = i / 2;
//convert int decimal (base 10) to string.
itoa(t, b, 10);
//Concat to main binary string.
strcat(bin, b);
c++;
}
//reverse the string.
strrev(bin);
printf("\nBinary: %d(10) = %s(2)\n",i, bin);
return 0;
}

Binary to decimal in c

I have a simple code to convert binary to decimal numbers. In my compiler, the decomposition works just fine for number less than 1000, beyond the output is always the same 1023. Anybody has an idea ?
#include <stdio.h>
#include <stdlib.h>
// how many power of ten is there in a number
// (I don't use the pow() function to avoid trouble with floating numbers)
int residu(int N)
{
int i=0;
while(N>=1){
N=N/10;
i++;
}
return i;
}
//exponentiating a number a by a number b
int power(int a, int b){
int i;
int res=1;
for (i=0;i<b;i++){res=a*res;}
return res;
}
//converting a number N
int main()
{
int i;
//the number to convert
int N;
scanf("%d",&N);
//the final decimal result
int res=0;
//we decompose N by descending powers of 10, and M is the rest
int M=0;
for(i=0;i<residu(N);i++){
// simple loop to look if there is a power of (residu(N)-1-i) in N,
// if yes we increment the binary decomposition by
// power(2,residu(N)-1-i)
if(M+ power(10,residu(N)-1-i) <= N)
{
M = M+power(10,residu(N)-1-i);
res=power(2,residu(N)-1-i)+res;
}
}
printf("%d\n",res);
}
Yes try this :
#include <stdio.h>
int main(void)
{
char bin; int dec = 0;
while (bin != '\n') {
scanf("%c",&bin);
if (bin == '1') dec = dec * 2 + 1;
else if (bin == '0') dec *= 2; }
printf("%d\n", dec);
return 0;
}
Most likely this is because you are using an int to store your binary number. An int will not store numbers above 2^31, which is 10 digits long, and 1023 is the largest number you can get with 10 binary digits.
It would be much easier for you to read your input number as a string, and then process each character of the string.
After a little experimentation, I think that your program is intended to accept a number consisting of 1's and 0's only as a base-10 number (the %d reads a decimal number). For example, given input 10, it outputs 2; given 1010, it outputs 10; given 10111001, it outputs 185.
So far, so good. Unfortunately, given 1234, it outputs 15, which is a little unexpected.
If you are running on a machine where int is a 32-bit signed value, then you can't enter a number with more than 10 digits, because you overflow the limit of a 32-bit int (which can handle ±2 billion, in round terms). The scanf() function doesn't handle overflows well.
You could help yourself by echoing your inputs; this is a standard debugging technique. Make sure the computer got the value you are expecting.
I'm not going to attempt to fix the code because I think you're going about the problem in completely the wrong way. (I'm not even sure whether it's best described as binary to decimal, or decimal to binary, or decimal to binary to decimal!) You would do better to read the input as a string of (up to 31) characters, then validate that each one is either a 0 or a 1. Assuming that's correct, then you can process the string very straight-forwardly to generate a value which can be formatted by printf() as a decimal.
Shift left is the same than multiply by 2 and is more efficient, so I think it is a more c-like answer:
#include <stdio.h>
#include <stdlib.h>
int bin2int(const char *bin)
{
int i, j;
j = sizeof(int)*8;
while ( (j--) && ((*bin=='0') || (*bin=='1')) ) {
i <<= 1;
if ( *bin=='1' ) i++;
bin++;
}
return i;
}
int main(void)
{
char* input = NULL;
size_t size = 0;
while ( getline(&input, &size, stdin) > 0 ) {
printf("%i\n", bin2int(input));
}
free(input);
}
#include <stdio.h> //printf
#include <string.h> //strlen
#include <stdint.h> //uintX_t or use int instead - depend on platform.
/* reverse string */
char *strrev(char *str){
int end = strlen(str)-1;
int start = 0;
while( start<end ){
str[start] ^= str[end];
str[end] ^= str[start];
str[start] ^= str[end];
++start;
--end;
}
return str;
}
/* transform binary string to integer */
uint32_t binstr2int(char *bs){
uint32_t ret = 0;
uint32_t val = 1;
while(*bs){
if (*bs++ == '1') ret = ret + val;
val = val*2;
}
return ret;
}
int main(void){
char binstr[] = "1010101001010101110100010011111"; //1428875423
printf("Binary: %s, Int: %d\n", binstr, binstr2int(strrev(binstr)));
return 0;
}

Resources