So I'm working on creating a precise Decimal structure that stores it's characteristic and mantissa within a long and an unsigned long respectfully. Because I'm doing this I've had to come up with my own subtraction and addition functions.
While testing my functions I ran across the troublesome problem of "negative zero". Essentially, I cannot represent -0.1 through -0.9 because there is no way for me to put a negative sign on my zero without just using a flag value of some kind. This is background information and I'll post the code so you can see how I'm doing the arithmetic. The STRANGE behavior though is that I'm getting a number above ULONG_MAX. Specifically this is the output of my log:
diff->right: 18446744073699551616
b->right10000000
MANTISSA_LIMIT: 100000000
ULONG_MAX: 18446744073709551615
Subtracting 10.10000000 from 10.00000000
Test: tests/bin/decimal.out(subtractDecimalsWithCarry+0x79) [0x40109f] Decimal: 0.10000000
And the code:
helpers/decimal.h:
#ifndef __DECIMAL_H__
#include <limits.h>
#define MANTISSA_LIMIT 100000000
#define __DECIMAL_H__
typedef struct{ /* Calling them more convenient terms: */
long left; /* characteristic */
unsigned long right; /* mantissa */
}Decimal;
void createDecimal(long left, unsigned long right, Decimal * dec);
/* Perform arithmetic operations on Decimal structures */
void add_decimals(Decimal* a, Decimal* b, Decimal* sum);
void subtract_decimals(Decimal* a, Decimal* b, Decimal* diff);
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
void createDecimalFromString(Decimal * dec, const char * str);
#endif
And then decimal.c's relavant code:
/* Subtract two decimals, a - b */
void subtract_decimals(Decimal* a, Decimal* b, Decimal* diff){
diff->left = a->left - b->left;
diff->right = a->right - b->right;
fprintf(stderr, "diff->right: %lu\n", diff->right);
fprintf(stderr, "b->right%lu\n", b->right);
fprintf(stderr, "MANTISSA_LIMIT: %d\n", MANTISSA_LIMIT);
fprintf(stderr, "ULONG_MAX: %lu\n", ULONG_MAX);
if(diff->right > MANTISSA_LIMIT) {
if(diff->right != 18446744073699551616UL)
diff->left -= 1;
else
diff->left *= -1; /* This is where I might put a flag for -0*/
diff->right = ULONG_MAX - diff->right + (18446744073699551616UL == diff->right ? 1 : 0); /* +1 because of the wrap around, we have to 'count' 0. */
}
}
void createDecimalFromString(Decimal * dec, const char * str){
long left;
unsigned long right;
char * dotLocation;
char rawLeft[9];
char rawRight[9];
int i;
int dotPos;
long leadingZeros;
int numDetected;
if(str == NULL)
return;
bzero(rawLeft,9);
bzero(rawRight,9);
dotLocation = strstr(str, ".");
leadingZeros = numDetected = 0;
if(dotLocation == NULL){
left = atol(str);
right = 0;
}else{
/* ghetto strncpy */
for(i=0; i != 9 && str[i] != *dotLocation; ++i)
rawLeft[i] = str[i];
rawLeft[i] = '\0';
dotPos = i+1;
left = atol(rawLeft);
for(i=0; i != 9 && str[dotPos] != '\0'; ++i,++dotPos){
if(str[dotPos] == '0' && numDetected == 0)
leadingZeros++;
else
numDetected = 1;
rawRight[i] = str[dotPos];
}
rawRight[i] = '\0';
right = strtoul(rawRight,NULL,10);
if(leadingZeros > 0)
/* subtract the leading zeros, then also the powers of ten taken by the number itself*/
right = (right*(powlu(10,7-leadingZeros-(i-2))));
else
right = right*(powlu(10,(i > 1 ? 8-(i-1) : 7 )));
}
dec->left = left;
dec->right = right;
}
And finally the calling code:
#include <stdio.h>
#include <stdlib.h>
#include <execinfo.h>
#include <unistd.h>
#include "helpers/decimal.h"
void traceAndPrintDecimal(Decimal testDec){
int nptrs;
void *buffer[100];
char **strings;
nptrs = backtrace(buffer, 100);
strings = backtrace_symbols(buffer, nptrs);
printf("Test: %s Decimal: %ld.%08lu\n", strings[1], testDec.left, testDec.right);
free(strings);
}
void subtractDecimalsWithCarry(){
Decimal oper1;
Decimal oper2;
Decimal result;
createDecimalFromString(&oper1, "10.0");
createDecimalFromString(&oper2, "10.1");
subtract_decimals(&oper1, &oper2, &result);
printf("Subtracting %ld.%08lu from %ld.%08lu\n",oper2.left,oper2.right,oper1.left,oper1.right);
traceAndPrintDecimal(result);
}
int main(){
subtractDecimalsWithCarry();
return 0;
}
And the piece of my makefile for compiling:
decimal.o: src/helpers/decimal.c
cc -I./headers -std=gnu99 -pedantic -Wall -Wextra -Werror -g -c src/helpers/decimal.c -o obj/decimal.o
test-decimal: tests/decimal-test.c decimal.o
cc -I./headers -std=gnu99 -pedantic -Wall -Wextra -Werror -g tests/decimal-test.c obj/decimal.o -o tests/bin/decimal.out -lm -rdynamic
It's strange that diff->right is larger than ULONG_MAX, does anyone know why this might be? If you need anymore information let me know and I'll do my best to update the question.
Mistaken "Number above ULONG_MAX".
At first glance diff->right with the value "18446744073699551616" appeared to be larger than ULONG_MAX ("18446744073709551615"). But is 9999999 less. (#UncleO)
OP's asserts in comment "any idea why the ones place goes a little cockeyed? The number should only be off by 1000000 because of the way the mantissa works. But it's off by 10000001 instead". Suggest this is incorrect.
// from createDecimalFromString(&oper1, "10.0");
oper1.right = 0
// from createDecimalFromString(&oper2, "10.1");
oper1.right = 10000000
// from subtract_decimals(&oper1, &oper2, &result)
diff->right = oper1.right - oper2.right --> 18446744073699551616
unsigned subtraction is well defined in C. In this case the difference oper1.right - oper2.right will mathematically result in oper1.right - oper1.right + (ULONG_MAX + 1).
" ... a result that cannot be represented by the resulting unsigned integer type is reduced modulo the number that is one greater than the largest value that can be represented by the resulting type." C11 6.2.5 8
Related
I am trying to call the "fpow" C function from python with "fpow" wrapper. It works but unfortunately both precision and accuracy is lacking. I guess that my use of mpfr is incorrect. How can I solve this?
numeric.c
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <gmp.h>
#include <mpfr.h>
#include "numeric.h"
char *fpow(char *x, char *y, unsigned long long prec)
{
mpfr_t a, b;
mpfr_init2(a, prec);
mpfr_init2(b, prec);
mpfr_set_str(a, x, 10, 0);
mpfr_set_str(b, y, 10, 0);
mpfr_pow(a, a, b, 1);
long dot;
char *str = malloc(sizeof(char) * (prec + 1));
mpfr_get_str(str, &dot, 10, prec, a, 1);
for(long j = dot; j < dot; j--)
str[j + 1] = str[j];
str[dot] = '.';
mpfr_clear(a);
mpfr_clear(b);
mpfr_free_cache();
return str;
}
basics.py
import ctypes as c
cfunc = c.CDLL('numeric.so')
def fpow(base: str, exponent: str, precision: int) -> str:
'''base to the exponent with the specified precision'''
cfunc.fpow.argtypes = (c.c_char_p, c.c_char_p, c.c_ulonglong)
cfunc.fpow.restype = c.c_char_p
return (
cfunc.fpow(
c.create_string_buffer(base.encode('utf-8')),
c.create_string_buffer(exponent.encode('utf-8')),
precision
)).decode('utf-8')
test.py
import Numer.basics as nm
print(nm.fpow('5.1', '3', 100))
Output of the test.py is "132.509999999999999999999999997382748843093935872477183435247383158639422617852687835693359375000000"
Your for loop has a bug. It starts with j equal to dot, so it will execute zero times. And, it is decrementing j.
Strings in C need an extra 0x00 char at the end to denote "end of string".
The malloc allocates space for prec + 1 extra char. This is enough for the mpfr_pow output [with 0x00 EOS string terminator].
But this does not allocate space for the . we are trying to add. So, we need to increase the amount allocated by malloc.
The [redacted] raw output of mpfr_pow is:
132650999999999999999999999999738274884309393587247718343524738315 ...
But, the output of your function is:
132.50999999999999999999999999738274884309393587247718343524738315 ...
Notice that you're trashing the [first] 6 with .
Here's a refactored version of the C code. I added main to be able to test independently of the python code:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <gmp.h>
#include <mpfr.h>
//#include "numeric.h"
char *
fpow(char *x, char *y, unsigned long long prec)
{
mpfr_t a, b;
mpfr_init2(a, prec);
mpfr_init2(b, prec);
mpfr_set_str(a, x, 10, 0);
mpfr_set_str(b, y, 10, 0);
mpfr_pow(a, a, b, 1);
long dot;
// NOTE/BUG: allocates enough space for 0x00 EOS char but _not_ the added
// '.' character
#if 0
char *str = malloc(sizeof(char) * (prec + 1));
#else
char *str = malloc(sizeof(char) * (prec + 1 + 1));
#endif
mpfr_get_str(str, &dot, 10, prec, a, 1);
// original code ...
#if 0
for (long j = dot; j < dot; j--)
str[j + 1] = str[j];
str[dot] = '.';
// refactored code ...
#else
int prev = str[dot];
int next;
char *cur;
for (cur = &str[dot + 1]; *cur != 0; ++cur) {
next = *cur;
*cur = prev;
prev = next;
}
*cur++ = prev;
*cur = 0;
str[dot] = '.';
#endif
mpfr_clear(a);
mpfr_clear(b);
mpfr_free_cache();
return str;
}
int
main(void)
{
char *ret = fpow("5.1","3",100);
printf("%s\n",ret);
return 0;
}
UPDATE:
I have 2 questions. First is there a reason to not fix the for loop bug as such: for (long j = dot; j < prec; j--) str[j + 1] = str[j]; str[dot] = '.'; This seems to get the work done aswell.
It didn't work for me. Of course, there are different ways to do the insert. Mine [which works] is but one. Feel free to experiment. You could isolate the insertion code into a separate function [and try different functions].
The second question is that as you can probably try and see "prec" doesn't really work e.g. when i put prec=20 it doesn't give 20 digits accurately. – aras edeş
That's because for mpfr_init2, the second argument is not the number of decimal digits. It is the number of bits.
So, roughly, to get a given number of digits of precision, we need approximately 4x the number of bits (e.g. to represent the digit 9 we need 4 bits).
So, change:
mpfr_init2(a, prec);
mpfr_init2(b, prec);
Into:
mpfr_init2(a, prec * 4);
mpfr_init2(b, prec * 4);
Note that we could calculate the needed precision exactly but I think that's overkill in the given use case.
With that change, the redacted output is:
132.65099999999999999999999999999999999999999999999999999999999999 ...
Right now I am trying to convert an int to a char in C programming. After doing research, I found that I should be able to do it like this:
int value = 10;
char result = (char) value;
What I would like is for this to return 'A' (and for 0-9 to return '0'-'9') but this returns a new line character I think.
My whole function looks like this:
char int2char (int radix, int value) {
if (value < 0 || value >= radix) {
return '?';
}
char result = (char) value;
return result;
}
to convert int to char you do not have to do anything
char x;
int y;
/* do something */
x = y;
only one int to char value as the printable (usually ASCII) digit like in your example:
const char digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
int inttochar(int val, int base)
{
return digits[val % base];
}
if you want to convert to the string (char *) then you need to use any of the stansdard functions like sprintf, itoa, ltoa, utoa, ultoa .... or write one yourself:
char *reverse(char *str);
const char digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
char *convert(int number, char *buff, int base)
{
char *result = (buff == NULL || base > strlen(digits) || base < 2) ? NULL : buff;
char sign = 0;
if (number < 0)
{
sign = '-';
}
if (result != NULL)
{
do
{
*buff++ = digits[abs(number % (base ))];
number /= base;
} while (number);
if(sign) *buff++ = sign;
if (!*result) *buff++ = '0';
*buff = 0;
reverse(result);
}
return result;
}
A portable way of doing this would be to define a
const char* foo = "0123456789ABC...";
where ... are the rest of the characters that you want to consider.
Then and foo[value] will evaluate to a particular char. For example foo[0] will be '0', and foo[10] will be 'A'.
If you assume a particular encoding (such as the common but by no means ubiquitous ASCII) then your code is not strictly portable.
Characters use an encoding (typically ASCII) to map numbers to a particular character. The codes for the characters '0' to '9' are consecutive, so for values less than 10 you add the value to the character constant '0'. For values 10 or more, you add the value minus 10 to the character constant 'A':
char result;
if (value >= 10) {
result = 'A' + value - 10;
} else {
result = '0' + value;
}
Converting Int to Char
I take it that OP wants more that just a 1 digit conversion as radix was supplied.
To convert an int into a string, (not just 1 char) there is the sprintf(buf, "%d", value) approach.
To do so to any radix, string management becomes an issue as well as dealing the corner case of INT_MIN
The following C99 solution returns a char* whose lifetime is valid to the end of the block. It does so by providing a compound literal via the macro.
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
// Maximum buffer size needed
#define ITOA_BASE_N (sizeof(unsigned)*CHAR_BIT + 2)
char *itoa_base(char *s, int x, int base) {
s += ITOA_BASE_N - 1;
*s = '\0';
if (base >= 2 && base <= 36) {
int x0 = x;
do {
*(--s) = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"[abs(x % base)];
x /= base;
} while (x);
if (x0 < 0) {
*(--s) = '-';
}
}
return s;
}
#define TO_BASE(x,b) itoa_base((char [ITOA_BASE_N]){0} , (x), (b))
Sample usage and tests
void test(int x) {
printf("base10:% 11d base2:%35s base36:%7s ", x, TO_BASE(x, 2), TO_BASE(x, 36));
printf("%ld\n", strtol(TO_BASE(x, 36), NULL, 36));
}
int main(void) {
test(0);
test(-1);
test(42);
test(INT_MAX);
test(-INT_MAX);
test(INT_MIN);
}
Output
base10: 0 base2: 0 base36: 0 0
base10: -1 base2: -1 base36: -1 -1
base10: 42 base2: 101010 base36: 16 42
base10: 2147483647 base2: 1111111111111111111111111111111 base36: ZIK0ZJ 2147483647
base10:-2147483647 base2: -1111111111111111111111111111111 base36:-ZIK0ZJ -2147483647
base10:-2147483648 base2: -10000000000000000000000000000000 base36:-ZIK0ZK -2147483648
Ref How to use compound literals to fprintf() multiple formatted numbers with arbitrary bases?
Check out the ascii table
The values stored in a char are interpreted as the characters corresponding to that table. The value of 10 is a newline
So characters in C are based on ASCII (or UTF-8 which is backwards-compatible with ascii codes). This means that under the hood, "A" is actually the number "65" (except in binary rather than decimal). All a "char" is in C is an integer with enough bytes to represent every ASCII character. If you want to convert an int to a char, you'll need to instruct the computer to interpret the bytes of an int as ASCII values - and it's been a while since I've done C, but I believe the compiler will complain since char holds fewer bytes than int. This means we need a function, as you've written. Thus,
if(value < 10) return '0'+value;
return 'A'+value-10;
will be what you want to return from your function. Keep your bounds checks with "radix" as you've done, imho that is good practice in C.
1. Converting int to char by type casting
Source File charConvertByCasting.c
#include <stdio.h>
int main(){
int i = 66; // ~~Type Casting Syntax~~
printf("%c", (char) i); // (type_name) expression
return 0;
}
Executable charConvertByCasting.exe command line output:
C:\Users\boqsc\Desktop\tcc>tcc -run charconvert.c
B
Additional resources:
https://www.tutorialspoint.com/cprogramming/c_type_casting.htm
https://www.tutorialspoint.com/cprogramming/c_data_types.htm
2. Convert int to char by assignment
Source File charConvertByAssignment.c
#include <stdio.h>
int main(){
int i = 66;
char c = i;
printf("%c", c);
return 0;
}
Executable charConvertByAssignment.exe command line output:
C:\Users\boqsc\Desktop\tcc>tcc -run charconvert.c
B
You can do
char a;
a = '0' + 5;
You will get character representation of that number.
Borrowing the idea from the existing answers, i.e. making use of array index.
Here is a "just works" simple demo for "integer to char[]" conversion in base 10, without any of <stdio.h>'s printf family interfaces.
Test:
$ cc -o testint2str testint2str.c && ./testint2str
Result: 234789
Code:
#include <stdio.h>
#include <string.h>
static char digits[] = "0123456789";
void int2str (char *buf, size_t sz, int num);
/*
Test:
cc -o testint2str testint2str.c && ./testint2str
*/
int
main ()
{
int num = 234789;
char buf[1024] = { 0 };
int2str (buf, sizeof buf, num);
printf ("Result: %s\n", buf);
}
void
int2str (char *buf, size_t sz, int num)
{
/*
Convert integer type to char*, in base-10 form.
*/
char *bufp = buf;
int i = 0;
// NOTE-1
void __reverse (char *__buf, int __start, int __end)
{
char __bufclone[__end - __start];
int i = 0;
int __nchars = sizeof __bufclone;
for (i = 0; i < __nchars; i++)
{
__bufclone[i] = __buf[__end - 1 - i];
}
memmove (__buf, __bufclone, __nchars);
}
while (num > 0)
{
bufp[i++] = digits[num % 10]; // NOTE-2
num /= 10;
}
__reverse (buf, 0, i);
// NOTE-3
bufp[i] = '\0';
}
// NOTE-1:
// "Nested function" is GNU's C Extension. Put it outside if not
// compiled by GCC.
// NOTE-2:
// 10 can be replaced by any radix, like 16 for hexidecimal outputs.
//
// NOTE-3:
// Make sure inserting trailing "null-terminator" after all things
// done.
NOTE-1:
"Nested function" is GNU's C Extension. Put it outside if not
compiled by GCC.
NOTE-2:
10 can be replaced by any radix, like 16 for hexidecimal outputs.
NOTE-3:
Make sure inserting trailing "null-terminator" after all things
done.
I was making a binary adder in C using only logic gates. Now for example, I wanted to add 4 + (-5) so I would get the answer in 2's complement and then convert it to decimal. In the same way, if I do, 4 + (-3) I would get the answer in binary and would like to use the same function to convert it to decimal.
Now, I know how to convert a 2's complement number into decimal, convert binary into decimal. But I want to use the same function to convert both 2's complement and binary into decimal. To do that, I have to figure out if the number is binary or 2's complement. It is where I am stuck.
Can someone give me an idea, algorithm or code in C to find out whether a number is 2's complement or normal binary?
SOURCE CODE
Chips
// Author: Ashish Ahuja
// Date created: 8-1-2016
// Descriptions: This file stores all the chips for
// the nand2tetris project.
// Links: www.nand2tetris.org
// class.coursera.org/nand2tetris1-001
// Files needed to compile successfully: ourhdr.h
int not (unsigned int a) {
if (a == 1) {
return 0;
}
else if (a == 0) {
return 1;
}
}
int and (unsigned int a, unsigned int b) {
if (a == 1 && b == 1)
return 1;
else if ((a == 1 && b == 0) || (a == 0 && b == 1) || (a == 0 && b == 0))
return 0;
}
int nand (unsigned int a, unsigned int b) {
unsigned int ans = 10;
ans = and (a, b);
unsigned int ack = not (ans);
return ack;
}
int or (unsigned int a, unsigned int b) {
return (nand (not (a), not (b)));
}
int nor (unsigned int a, unsigned int b) {
return (not (or (a, b)));
}
int xor (unsigned int a, unsigned int b) {
unsigned int a_r;
unsigned int b_r;
unsigned int sra;
unsigned int srb;
a_r = not (a);
b_r = not (b);
sra = nand (a_r, b);
srb = nand (b_r, a);
return nand (sra, srb);
}
int xnor (unsigned int a, unsigned int b) {
return (not (xor (a,b)));
}
Ourhdr.h
include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <signal.h>
#include <unistd.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <math.h>
#include <time.h>
#include <stdbool.h>
#include <termios.h>
#include <stddef.h>
#include <sys/types.h>
#include <my/signal.h>
#include <my/socket.h>
#include <my/io.h>
#include <my/lib.h>
#include <my/tree.h>
#include <my/bits.h>
#include <my/binary.h>
//#include <my/error.h>
#define MAXLINE 4096
#define BUFF_SIZE 1024
Note: I am gonna only show headers needed by this project. So just think that the other headers are of no use.
Function to Convert array to integer
int array_num (int arr [], int n) {
char str [6] [2];
int i;
char number [13] = {'\n'};
for (i = 0; i < n; i ++)
sprintf (str [i], "%d", arr [i]);
for (i = 0; i < n; i ++)
strcat (number, str [i]);
i = atoi (number);
return i;
}
Function to get bits of an int, and return an pointer to an array containing bits
int *get_bits (int n, int bitswanted) {
int *bits = malloc (sizeof (int) * bitswanted);
int k;
int mask;
int masked_n;
int thebit;
for (k = 0; k < bitswanted; k ++) {
mask = 1 << k;
masked_n = n & mask;
thebit = masked_n >> k;
bits [k] = thebit;
}
return bits;
}
Function to convert binary to decimal, and vice-versa
int convert_num (int n, int what) {
int rem;
int i;
int binary = 0;
int decimal = 0;
switch (what) {
case 0: // Convert decimal to binary
i = 0;
rem = 0;
while (n != 0) {
rem = n % 2;
n /= 2;
binary += rem * i;
i *= 10;
}
return binary;
break;
case 1: // Convert binary to decimal
i = 0;
rem = 0;
while (n != 0) {
rem = n % 10;
n /= 10;
decimal += rem*pow (2, i);
i ++;
}
return decimal;
break;
}
}
Main program design
Read two numbers n1 and n2 from user
Get an pointer bits1 and bits2 which point to an array which have the bits of n1 and n2. Note, that the array will be in reverse order, i.e, the last bit will be in the 0th variable of the array.
Put a for loop in which you will pass three variables, i.e, the bits you want to add and carry from the last adding of bits operation.
The return value will be the the addition of the three bits and carry, will be changed to the carry after the addition (if any). Eg- You pass 1 and 0, and carry is 1, so, the return will be 0 and carry will be again changed to 1.
The return will be stored in another array called sum.
The array sum will be converted to an int using the function I have given above.
Now this is where I am stuck. I now want to change the int into a decimal number. But to do that, I must know whether, it is in form of a 2's compliment number, or just a normal binary. I do not know how to do that.
NOTE: The nand2tetris project is done in hdl but I was familiar to do it with C. Also, many of the function I have mentioned above have been taken from stackoverflow. Although, the design is my own.
Both are binary. The difference is signed or unsigned. For >0 this is the same. For <0 you can see that it is a negative number just by looking at the highest bit. Using the same function for output can easily be done by looking at the highest bit, if it is set output '-' and convert the negative two's complement to its abs() which can easily be done bitwise.
CAUTION: If a positive number is big enough to set the highest bit, it can no longer be distinguished from negative two's complement. That is the reason why programming languages do need separate types for this (e.g. in C int and unsigned).
Fun fact about 2s complement - and reason it has become so widely used is:
For addition you don't need to know if it is negative or positive. Just add as unsigned.
Subtraction is similar, but you have to negate the second operand (see below).
The only thing you might have to care about is overflow. For that you have to check if the sign of the result can actually result from the signs of the two inputs and a addition-overflow from the pre-most to the most signigficant bit.
Negation of int n is simply done by 0 - n. Alternatively, you can invert all bits and add 1 - that's what a CPU or hardware subtracter basically does.
Note that 2's complement binaries have an asymmetric range: -(N+1) ... N.
For conversion, just check for the minimum value (must be treated seperately) and output directly, otherwise get the sign (if ( n < 0 )) and negate the value (n = -n) and finally convert the -then unsigned/positive - value to a string or character stream.
I have this code to get from binary to decimal:
#include <stdio.h>
#include <math.h>
#include <stdint.h>
int main() {
printf("%lld\n", binaryToDecimal(11110000111100001111000011110000));
return 1;
}
long long binaryToDecimal(long long binary) {
int power = 0;
return binaryToDecimalHelper(binary, power);
}
long long binaryToDecimalHelper(long long binary, int power) {
if (binary != 0) {
long long i = binary % (double)10;
return (i * pow(2, power))
+ binaryToDecimalHelper(binary / 10, power + 1);
} else {
return 0;
}
}
It works fine for small values (up to 16 bits) , but for 32 bits (which is what I need) it just returns garbage.
The number 11110000111100001111000011110000 is of type int, which can't hold a number as big as 11110000111100001111000011110000 in your machine. It's better to use a string representation instead ("11110000111100001111000011110000") and adjust your algorithm, if you can.
if you are limited to 32 bits maximum this is one example:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void setStr(char *c, const char * x)
{
int i = 0;
while(x[i] != '\0')
{
c[i] = x[i];
i++;
}
}
void prepend(char* s, const char* t)
{
size_t len = strlen(t);
size_t i;
memmove(s + len, s, strlen(s) + 1);
for (i = 0; i < len; ++i)
{
s[i] = t[i];
}
}
int main(int argc, char const *argv[])
{
char *x = malloc(33*sizeof(char));
setStr(x, "111");
while (strlen(x) < 31) // not really necessary, but will help to 'cut' bytes if necessary
{
prepend(x,"0");
}
printf("%s\n", x);
int i = strtol(x,NULL,2);
printf("%d\n",i);
free(x);
return 0;
}
The first thing to be clear on is that your code does not convert anything to decimal, but rather to an int. Conversion to a decimal character string representation of that integer is performed by the printf() call.
The literal constant 11110000111100001111000011110000 is interpreted by the compiler (or would be if it were not so large) as a decimal value, and as such will require 104 bits to store;
i.e. log10(1111000011110000111100001111000010) / log10(210)
Representing a binary value with a decimal integer containing only 1 and 0 digits does not make much mathematical sense - though it may be convenient for small integers. A 64 bit unsigned long long is good for only 20 decimal digits (using just 1 and 0 - it can represent all 19 digit positive decimal integers, and some 20 digit values);
i.e. log10(210) * 6410
If you need longer binary values, then you should probably use a string representation. This is in fact simpler and more efficient in any case - you can use the fact that the machine representation of integers already is binary:
#include <stdio.h>
#include <stdint.h>
uint64_t binstrToInt( const char* binstr )
{
uint64_t result = 0 ;
int bit = 0;
while( binstr[bit] != '\0' )
{
if( binstr[bit] == '1' )
{
result |= 1 ;
}
bit++ ;
if( binstr[bit] != '\0' )
{
result <<= 1 ;
}
}
return result ;
}
int main()
{
printf("%llu\n", binstrToInt( "11110000111100001111000011110000" ) ) ;
return 0 ;
}
This would be easiest by far with a string as input instead of an int, and would allow longer numbers. Your problem is probably being caused by integer overflow.
String version:
#include <math.h>
#include <stdio.h>
#include <string.h>
int main() {
const char * const string_to_convert = "1010"
int result = 0;
for( int i = strlen(string_to_convert) - 1; i >= 0; --i ) {
if( string_to_convert[i] == '1' ) {
// Careful with pow() -- returns double, may round incorrectly
result += (int)pow( 10.0d, (double)i )
}
}
fprintf( stdout, "%d", result );
return 0;
}
Also, I'm not sure what the point of the return 1 is. Usually a non-zero return value from main indicates an error.
I don't understand why I am getting the error:
~/SecureSoftware$ gcc AddNumTest.c
AddNumTest.c:11:0: warning: "INT_MAX" redefined [enabled by default]
/usr/lib/gcc/x86_64-linux-gnu/4.6/include-fixed/limits.h:121:0: note: this is the location of the previous definition
I am looking for a way to not crash my program when a big number is added in the command line.
#include<stdio.h>
#include <stdlib.h>
#include <limits.h>
#define INT_MAX (2147483647)
#define INT_MIN (-2147483647)
int main(int argc, char** argv)
{
int i, TotalOfNumbers = 0;
for (i = 1; i < argc; i++)
{
TotalOfNumbers += atoi(argv[i]);
printf("Total of numbers entered = %d\n", TotalOfNumbers);
}
return 0;
}
Redefining INT_MIN and INT_MAX doesn't change the actual limits, it just makes the constants describing them inaccurate.
The limits are based on your platform's integer sizes / widths. To have genuinely different limits, you need to use different data types (for instance, long rather than int).
If a long isn't big enough, you may need to move up further, to a long long; note that these aren't specified prior to the C99 standard, so you need to have a reasonably modern compiler.
Changing your program to use longs instead of ints would look like this:
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
int main(int argc, char** argv)
{
long long i, TotalOfNumbers = 0;
for (i = 1; i < argc; i++)
{
TotalOfNumbers += atoll(argv[i]);
printf("Total of numbers entered = %lld\n", TotalOfNumbers);
}
return 0;
}
Here is the correct way to convert text to integers in C: unlike atoi, strtol will actually tell you if the number is too large, although in a somewhat awkward fashion.
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
long
reliable_atol(const char *s)
{
char *endp;
long rv;
errno = 0;
rv = strtol(s, &endp, 10);
if (errno || endp == s || *endp) {
fprintf(stderr, "number malformed or out of range: %s\n", s);
exit(1);
}
return rv;
}
Use this function instead of atoi in your code, and change TotalOfNumbers to long (and print it with %ld).
You might also want to try to detect overflow in the addition, but you can do that without using INT_MAX or INT_MIN:
int main(int argc, char **argv)
{
int i;
long TotalOfNumbers = 0;
for (i = 1; i < argc; i++) {
long n = reliable_atol(argv[i]);
long sum = (long) ((unsigned long)TotalOfNumbers + (unsigned long)n);
if ((n > 0 && sum < TotalOfNumbers) || (n < 0 && sum > TotalOfNumbers)) {
fputs("numeric overflow while computing sum\n", stderr);
exit(2);
}
TotalOfNumbers = sum;
}
printf("total %ld\n", TotalOfNumbers);
return 0;
}
The casts are required, because signed integer overflow provokes undefined behavior but unsigned integer overflow doesn't. This code technically won't work on a non-twos-complement CPU but nobody has manufactured one of those in decades and the C standard's continued provision for the possibility is getting decidedly silly.
If I stick the above two code samples together, making a complete program, I get this for your test case:
$ gcc -O2 -Wall test.c # default mode for this CPU+OS: long is 64 bits
$ ./a.out 22220020202 45
total 22220020247
$ gcc -O2 -Wall -m32 test.c # alternative mode: long is 32 bits
$ ./a.out 22220020202 45
number malformed or out of range: 22220020202