In my C code, I want to calculate the factorial for numbers in the range 1 to 100. For small numbers, the function works, but for bigger numbers (for example 100!) it returns incorrect result. Is there any way to handle factorial of large numbers in C?
The compiler I'm using is gcc v4.3.3.
My code is as follows:
#include <stdio.h>
#include <math.h>
double print_solution(int);
int main(void)
{
int no_of_inputs, n;
int ctr = 1;
scanf("%d",&no_of_inputs); //Read no of inputs
do
{
scanf("%d",&n); //Read the input
printf("%.0f\n", print_solution(n));
ctr++;
} while(ctr <= no_of_inputs);
return 0;
}
double print_solution(int n)
{
if(n == 0 || n == 1)
return 1;
else
return n*print_solution(n-1);
}
No standard C data type will accurately handle numbers as large as 100!. Your only option if to use arbitrary precision integer arithmetic, either through a library or done by yourself.
If this is just some hobby project, I'd suggest trying it yourself. It's kind of a fun exercise. If this is work-related, use a pre-existing library.
The largest C data type you'll normally get is a 64-bit integer. 100! is in the order of 10157, which takes at least 525 bits to store accurately as an integer.
100 factorial is huge, to be precise it's 93326215443944152681699238856266700490715968264381621468592963895217
59999322991560894146397615651828625369792082722375825118521091686400
00000000000000000000.
Maybe you should use a bignum library like GMP. It's got nice docs, a pretty consistent interface, speed and if you're on Linux your distro probably has a package (I think mine installs it by default)
To approximately compute factorials of large numbers you can go this way:
n! = n * (n-1)!
so log(n!) = log(n) + log(n-1!)
Now you can use dynamic programming to compute log(n!) and calculate
n! as (base)^(log-value)
If you don't want to use a bigint library, the best you can do with the stdlib is using long double and tgammal() from math.h:
long double fact(unsigned n)
{
return tgammal(n + 1);
}
This'll get you 100! with a precision of 18 decimals on x86 (ie 80 bit long double).
An exact implementation isn't that complicated either:
#include <math.h>
#include <stdio.h>
#include <string.h>
void multd(char * s, size_t len, unsigned n)
{
unsigned values[len];
memset(values, 0, sizeof(unsigned) * len);
for(size_t i = len; i--; )
{
unsigned x = values[i] + (s[i] - '0') * n;
s[i] = '0' + x % 10;
if(i) values[i - 1] += x / 10;
}
}
void factd(char * s, size_t len, unsigned n)
{
memset(s, '0', len - 1);
s[len - 1] = '1';
for(; n > 1; --n) multd(s, len, n);
}
int main(void)
{
unsigned n = 100;
size_t len = ceill(log10l(tgammal(n + 1)));
char dstr[len + 1];
dstr[len] = 0;
factd(dstr, len, n);
puts(dstr);
}
Everyone is telling you the correct answer however a couple of further points.
Your initial idea to use a double to get a wider range is not working because a double can not store this data precisely. It can do the calculations but with a lot of rounding. This is why bigint libraries exist.
I know this is probably an example from a tutorial or examples site but doing unbounded recursion will bite you at some point. You have a recursive solution for what is essentially an iterative process. You will understand why this site is named as it is when you try running your program with larger values (Try 10000).
A simple iterative approach is as follows
int answer, idx;
for (answer = 1, idx = 1; idx <= no_of_inputs; idx++ ) {
answer = answer * idx;
}
printf("Factorial of %3d = %d\n", no_of_inputs, answer);
this is what i made to solve a google riddle some years ago, it uses GMP library http://gmplib.org/:
#include <stdio.h>
#include "gmp.h"
void fact(mpz_t r,int n){
unsigned int i;
mpz_t temp;
mpz_init(temp);
mpz_set_ui(r,1);
for(i=1;i<=n;i++){
mpz_set_ui(temp,i);
mpz_mul(r,r,temp);
}
mpz_clear(temp);
}
int main(void) {
mpz_t r;
mpz_init(r);
fact(r,188315);
/* fact(r,100); */
gmp_printf("%Zd\n",r);
mpz_clear(r);
return(0);
}
gcc -lgmp -o fact fact.c
./fact
you could try going for "unsigned long long" type, but thats the maximum you can get with built in types.
I'd suggest (as cletus has already mentioned) either going with a known implementation of big numbers, or writing one yourself. "its a nice exercise" x 2.
If you want to use only the standard data types and you do not need the exact answer, then compute the logarithm of n! instead of n! itself. The logarithm of n! fits easily in a double (unless n is huge).
Any ways to handle factorial of large numbers in C ?
As factorials can rapidly exceed the range of standard fixed width integers and even floating point types like double, Code should consider a user type that allows for unbounded integer precision for an exact answer.
Various wide integer precision libraries exist, yet if code needs a simple solution, consider using a string.
The below is not fast, nor mindful of arrays bounds, yet is illustrative of the idea. The converting of '0'-'9' to/from 0-9 so much is wasteful, yet this does allow easy step-by-step debugging.
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
static char *strfact_mult(char *s, unsigned x) {
unsigned sum = 0;
size_t len = strlen(s);
size_t i = len;
while (i > 0) {
sum += (s[--i] - '0') * x;
s[i] = sum % 10 + '0';
sum /= 10;
}
while (sum) {
len++;
memmove(&s[1], s, len);
s[i] = sum % 10 + '0';
sum /= 10;
}
return s;
}
char *str_fact(char *dest, unsigned n) {
strcpy(dest, "1");
while (n > 1) {
strfact_mult(dest, n--);
}
return dest;
}
void test_fact(unsigned n) {
char s[1000];
printf("%3u %s\n", n, str_fact(s, n));
}
int main(void) {
test_fact(0);
test_fact(4);
test_fact(54);
test_fact(100);
}
Output
0 1
4 24
54 230843697339241380472092742683027581083278564571807941132288000000000000
100 93326215443944152681699238856266700490715968264381621468592963895217599993229915608941463976156518286253697920827223758251185210916864000000000000000000000000
Here is a solution for your question:
#include <stdio.h>
void factorial(int b){
int temp = 0, r, size = 0, x;
int arr[200] = {0};
int l_b = b-1;
while(b>0){
r = b%10;
arr[size++] = r;
b = b/10;
}
while(l_b >= 2){
int i=0;
while(size>0){
x = arr[i]*l_b+temp ;
arr[i++] = x%10;
temp = x/10;
size--;
}
while(temp>0){
arr[i++] = temp%10;
temp = temp/10;
}
size = i; --l_b;
}
for(int k=size-1;k>=0;k--)
printf("%d",arr[k]);//ok i'm taking space here
printf("\n");
}
int main(void) {
// your code goes here
int fact;
scanf("%d\n",&fact);
factorial(fact);
return 0;
}
Factorials up to 12! fit into a 32-bit integer. Factorials up to 20! fit into a 64-bit integer. After that, you've run out of bits on most machines. However, 34! fits into an unsigned 128-bit integer, 57! fits into a 256-bit integer, and 98! fits into an unsigned 512-bit integer. To calculate 100! as an integer, you need at least 525 bits.
This bc script calculates factorials (up to 35! but you can change the limit easily):
#!/usr/bin/bc -l
define f(n) {
auto r, i
r = 1
for (i = 1; i <= n; i++)
{
r *= i;
print "n = ", i, ", log2 = ", l(r)/l(2), ", n! = ", r, "\n"
}
}
f(35)
quit
And some sample values:
# Key values
# n = 1, log2 = 0.00000000000000000000, n! = 1
# n = 2, log2 = 1.00000000000000000000, n! = 2
# n = 3, log2 = 2.58496250072115618147, n! = 6
# n = 4, log2 = 4.58496250072115618149, n! = 24
# n = 5, log2 = 6.90689059560851852938, n! = 120
# n = 6, log2 = 9.49185309632967471087, n! = 720
# n = 7, log2 = 12.29920801838727881834, n! = 5040
# n = 8, log2 = 15.29920801838727881836, n! = 40320
# n = 9, log2 = 18.46913301982959118130, n! = 362880
# n = 10, log2 = 21.79106111471695352921, n! = 3628800
# n = 11, log2 = 25.25049273335425078544, n! = 39916800
# n = 12, log2 = 28.83545523407540696694, n! = 479001600
# n = 13, log2 = 32.53589495221649912738, n! = 6227020800
# n = 14, log2 = 36.34324987427410323486, n! = 87178291200
# n = 15, log2 = 40.25014046988262176421, n! = 1307674368000
# n = 16, log2 = 44.25014046988262176426, n! = 20922789888000
# n = 17, log2 = 48.33760331113296117256, n! = 355687428096000
# n = 18, log2 = 52.50752831257527353551, n! = 6402373705728000
# n = 19, log2 = 56.75545582601885902935, n! = 121645100408832000
# n = 20, log2 = 61.07738392090622137726, n! = 2432902008176640000
# n = 21, log2 = 65.46970134368498166621, n! = 51090942171709440000
# ...
# n = 34, log2 = 127.79512061296909618950, n! = 295232799039604140847618609643520000000
# n = 35, log2 = 132.92440362991406264487, n! = 10333147966386144929666651337523200000000
# ...
# n = 57, log2 = 254.48541573017643505939
# n = 58, log2 = 260.34339672530400718017
# ...
# n = 98, log2 = 511.49178048020535201128
# n = 99, log2 = 518.12113710028496163045
# n = 100, log2 = 524.76499329005968632625
For the factorials 57!, 58!, 98!, 99!, 100! I've omitted the factorial value as it spreads over multiple lines in the output and isn't all that important. Note that 100! requires at least 525 bits of precision.
This code is available in my SOQ (Stack Overflow Questions) repository on GitHub as file factorial.bc in the src/miscellany sub-directory.
You could use double or long double to extend the range of values, but you lose some accuracy.
This is most certainly due to overflow. You need a way to represent large numbers (unsigned long long won't even cover up to 21!).
I guess that's because you are overflowing the int range, which is up to approx. 2 billions. You can get up to 4 billions if you use unsigned int, but beyond that you have to use the bigint library.
100! = 933262154439441526816992388562667004907159682643816214685929
6389521759999322991560894146397156518286253697920827223758251185210
916864000000000000000000000000
You can't represent a number this big with an int or a long.
In addition to the advice of others, I'd suggest getting familiar with the storage limits of the basic types (int, long, long long, ...) for whatever computer/platform you're actually using. ("When in doubt, print more out!")
One earlier poster referred to an 80-bit precision limit, but that's particular to an x86 CPU.
Another person cited ISO C90 several times, although C99 is the latest standard; even if many compilers haven't completely implemented C99, you will probably find that they very-very likely at least have support for long long, which should correspond to >= 64-bit precision.
Don't use the recursive algorithm I think, it is super slow, even if it is cached it will be slow. This is just something you should consider.
The reason for this is when you call fact(100) you don't actually run it 100 times, you actually run that function 5050 times. Which is bad, if it is cached then it could be 100 times, however, it is still slower to run a function call with if statements then to run a loop.
double print_solution(int n)
{
double rval = 1;
unsigned int i;
for( i = 1; i <= n; i++ ) {
rval *= i;
}
return rval;
}
Using arbitary-precision arithmetic you could make it go very high, however, you need to use a library to do that, or you could make your own library, but that would take a lot of time to do.
Related
In my C code, I want to calculate the factorial for numbers in the range 1 to 100. For small numbers, the function works, but for bigger numbers (for example 100!) it returns incorrect result. Is there any way to handle factorial of large numbers in C?
The compiler I'm using is gcc v4.3.3.
My code is as follows:
#include <stdio.h>
#include <math.h>
double print_solution(int);
int main(void)
{
int no_of_inputs, n;
int ctr = 1;
scanf("%d",&no_of_inputs); //Read no of inputs
do
{
scanf("%d",&n); //Read the input
printf("%.0f\n", print_solution(n));
ctr++;
} while(ctr <= no_of_inputs);
return 0;
}
double print_solution(int n)
{
if(n == 0 || n == 1)
return 1;
else
return n*print_solution(n-1);
}
No standard C data type will accurately handle numbers as large as 100!. Your only option if to use arbitrary precision integer arithmetic, either through a library or done by yourself.
If this is just some hobby project, I'd suggest trying it yourself. It's kind of a fun exercise. If this is work-related, use a pre-existing library.
The largest C data type you'll normally get is a 64-bit integer. 100! is in the order of 10157, which takes at least 525 bits to store accurately as an integer.
100 factorial is huge, to be precise it's 93326215443944152681699238856266700490715968264381621468592963895217
59999322991560894146397615651828625369792082722375825118521091686400
00000000000000000000.
Maybe you should use a bignum library like GMP. It's got nice docs, a pretty consistent interface, speed and if you're on Linux your distro probably has a package (I think mine installs it by default)
To approximately compute factorials of large numbers you can go this way:
n! = n * (n-1)!
so log(n!) = log(n) + log(n-1!)
Now you can use dynamic programming to compute log(n!) and calculate
n! as (base)^(log-value)
If you don't want to use a bigint library, the best you can do with the stdlib is using long double and tgammal() from math.h:
long double fact(unsigned n)
{
return tgammal(n + 1);
}
This'll get you 100! with a precision of 18 decimals on x86 (ie 80 bit long double).
An exact implementation isn't that complicated either:
#include <math.h>
#include <stdio.h>
#include <string.h>
void multd(char * s, size_t len, unsigned n)
{
unsigned values[len];
memset(values, 0, sizeof(unsigned) * len);
for(size_t i = len; i--; )
{
unsigned x = values[i] + (s[i] - '0') * n;
s[i] = '0' + x % 10;
if(i) values[i - 1] += x / 10;
}
}
void factd(char * s, size_t len, unsigned n)
{
memset(s, '0', len - 1);
s[len - 1] = '1';
for(; n > 1; --n) multd(s, len, n);
}
int main(void)
{
unsigned n = 100;
size_t len = ceill(log10l(tgammal(n + 1)));
char dstr[len + 1];
dstr[len] = 0;
factd(dstr, len, n);
puts(dstr);
}
Everyone is telling you the correct answer however a couple of further points.
Your initial idea to use a double to get a wider range is not working because a double can not store this data precisely. It can do the calculations but with a lot of rounding. This is why bigint libraries exist.
I know this is probably an example from a tutorial or examples site but doing unbounded recursion will bite you at some point. You have a recursive solution for what is essentially an iterative process. You will understand why this site is named as it is when you try running your program with larger values (Try 10000).
A simple iterative approach is as follows
int answer, idx;
for (answer = 1, idx = 1; idx <= no_of_inputs; idx++ ) {
answer = answer * idx;
}
printf("Factorial of %3d = %d\n", no_of_inputs, answer);
this is what i made to solve a google riddle some years ago, it uses GMP library http://gmplib.org/:
#include <stdio.h>
#include "gmp.h"
void fact(mpz_t r,int n){
unsigned int i;
mpz_t temp;
mpz_init(temp);
mpz_set_ui(r,1);
for(i=1;i<=n;i++){
mpz_set_ui(temp,i);
mpz_mul(r,r,temp);
}
mpz_clear(temp);
}
int main(void) {
mpz_t r;
mpz_init(r);
fact(r,188315);
/* fact(r,100); */
gmp_printf("%Zd\n",r);
mpz_clear(r);
return(0);
}
gcc -lgmp -o fact fact.c
./fact
you could try going for "unsigned long long" type, but thats the maximum you can get with built in types.
I'd suggest (as cletus has already mentioned) either going with a known implementation of big numbers, or writing one yourself. "its a nice exercise" x 2.
If you want to use only the standard data types and you do not need the exact answer, then compute the logarithm of n! instead of n! itself. The logarithm of n! fits easily in a double (unless n is huge).
Any ways to handle factorial of large numbers in C ?
As factorials can rapidly exceed the range of standard fixed width integers and even floating point types like double, Code should consider a user type that allows for unbounded integer precision for an exact answer.
Various wide integer precision libraries exist, yet if code needs a simple solution, consider using a string.
The below is not fast, nor mindful of arrays bounds, yet is illustrative of the idea. The converting of '0'-'9' to/from 0-9 so much is wasteful, yet this does allow easy step-by-step debugging.
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
static char *strfact_mult(char *s, unsigned x) {
unsigned sum = 0;
size_t len = strlen(s);
size_t i = len;
while (i > 0) {
sum += (s[--i] - '0') * x;
s[i] = sum % 10 + '0';
sum /= 10;
}
while (sum) {
len++;
memmove(&s[1], s, len);
s[i] = sum % 10 + '0';
sum /= 10;
}
return s;
}
char *str_fact(char *dest, unsigned n) {
strcpy(dest, "1");
while (n > 1) {
strfact_mult(dest, n--);
}
return dest;
}
void test_fact(unsigned n) {
char s[1000];
printf("%3u %s\n", n, str_fact(s, n));
}
int main(void) {
test_fact(0);
test_fact(4);
test_fact(54);
test_fact(100);
}
Output
0 1
4 24
54 230843697339241380472092742683027581083278564571807941132288000000000000
100 93326215443944152681699238856266700490715968264381621468592963895217599993229915608941463976156518286253697920827223758251185210916864000000000000000000000000
Here is a solution for your question:
#include <stdio.h>
void factorial(int b){
int temp = 0, r, size = 0, x;
int arr[200] = {0};
int l_b = b-1;
while(b>0){
r = b%10;
arr[size++] = r;
b = b/10;
}
while(l_b >= 2){
int i=0;
while(size>0){
x = arr[i]*l_b+temp ;
arr[i++] = x%10;
temp = x/10;
size--;
}
while(temp>0){
arr[i++] = temp%10;
temp = temp/10;
}
size = i; --l_b;
}
for(int k=size-1;k>=0;k--)
printf("%d",arr[k]);//ok i'm taking space here
printf("\n");
}
int main(void) {
// your code goes here
int fact;
scanf("%d\n",&fact);
factorial(fact);
return 0;
}
Factorials up to 12! fit into a 32-bit integer. Factorials up to 20! fit into a 64-bit integer. After that, you've run out of bits on most machines. However, 34! fits into an unsigned 128-bit integer, 57! fits into a 256-bit integer, and 98! fits into an unsigned 512-bit integer. To calculate 100! as an integer, you need at least 525 bits.
This bc script calculates factorials (up to 35! but you can change the limit easily):
#!/usr/bin/bc -l
define f(n) {
auto r, i
r = 1
for (i = 1; i <= n; i++)
{
r *= i;
print "n = ", i, ", log2 = ", l(r)/l(2), ", n! = ", r, "\n"
}
}
f(35)
quit
And some sample values:
# Key values
# n = 1, log2 = 0.00000000000000000000, n! = 1
# n = 2, log2 = 1.00000000000000000000, n! = 2
# n = 3, log2 = 2.58496250072115618147, n! = 6
# n = 4, log2 = 4.58496250072115618149, n! = 24
# n = 5, log2 = 6.90689059560851852938, n! = 120
# n = 6, log2 = 9.49185309632967471087, n! = 720
# n = 7, log2 = 12.29920801838727881834, n! = 5040
# n = 8, log2 = 15.29920801838727881836, n! = 40320
# n = 9, log2 = 18.46913301982959118130, n! = 362880
# n = 10, log2 = 21.79106111471695352921, n! = 3628800
# n = 11, log2 = 25.25049273335425078544, n! = 39916800
# n = 12, log2 = 28.83545523407540696694, n! = 479001600
# n = 13, log2 = 32.53589495221649912738, n! = 6227020800
# n = 14, log2 = 36.34324987427410323486, n! = 87178291200
# n = 15, log2 = 40.25014046988262176421, n! = 1307674368000
# n = 16, log2 = 44.25014046988262176426, n! = 20922789888000
# n = 17, log2 = 48.33760331113296117256, n! = 355687428096000
# n = 18, log2 = 52.50752831257527353551, n! = 6402373705728000
# n = 19, log2 = 56.75545582601885902935, n! = 121645100408832000
# n = 20, log2 = 61.07738392090622137726, n! = 2432902008176640000
# n = 21, log2 = 65.46970134368498166621, n! = 51090942171709440000
# ...
# n = 34, log2 = 127.79512061296909618950, n! = 295232799039604140847618609643520000000
# n = 35, log2 = 132.92440362991406264487, n! = 10333147966386144929666651337523200000000
# ...
# n = 57, log2 = 254.48541573017643505939
# n = 58, log2 = 260.34339672530400718017
# ...
# n = 98, log2 = 511.49178048020535201128
# n = 99, log2 = 518.12113710028496163045
# n = 100, log2 = 524.76499329005968632625
For the factorials 57!, 58!, 98!, 99!, 100! I've omitted the factorial value as it spreads over multiple lines in the output and isn't all that important. Note that 100! requires at least 525 bits of precision.
This code is available in my SOQ (Stack Overflow Questions) repository on GitHub as file factorial.bc in the src/miscellany sub-directory.
You could use double or long double to extend the range of values, but you lose some accuracy.
This is most certainly due to overflow. You need a way to represent large numbers (unsigned long long won't even cover up to 21!).
I guess that's because you are overflowing the int range, which is up to approx. 2 billions. You can get up to 4 billions if you use unsigned int, but beyond that you have to use the bigint library.
100! = 933262154439441526816992388562667004907159682643816214685929
6389521759999322991560894146397156518286253697920827223758251185210
916864000000000000000000000000
You can't represent a number this big with an int or a long.
In addition to the advice of others, I'd suggest getting familiar with the storage limits of the basic types (int, long, long long, ...) for whatever computer/platform you're actually using. ("When in doubt, print more out!")
One earlier poster referred to an 80-bit precision limit, but that's particular to an x86 CPU.
Another person cited ISO C90 several times, although C99 is the latest standard; even if many compilers haven't completely implemented C99, you will probably find that they very-very likely at least have support for long long, which should correspond to >= 64-bit precision.
Don't use the recursive algorithm I think, it is super slow, even if it is cached it will be slow. This is just something you should consider.
The reason for this is when you call fact(100) you don't actually run it 100 times, you actually run that function 5050 times. Which is bad, if it is cached then it could be 100 times, however, it is still slower to run a function call with if statements then to run a loop.
double print_solution(int n)
{
double rval = 1;
unsigned int i;
for( i = 1; i <= n; i++ ) {
rval *= i;
}
return rval;
}
Using arbitary-precision arithmetic you could make it go very high, however, you need to use a library to do that, or you could make your own library, but that would take a lot of time to do.
This is the function to calculate the factorial of very large numbers. This works perfectly fine but the time complexity is quite high. How to reduce time complexity?
This function is called once.
Current time to find factorial 0f 1 million is 40 000ms;
Expected time: 10 000ms
static void calcfactorial(unsigned int n)
{
unsigned int carry, i, j;
len = factorial[0] = 1;
for (i = 1; i < LEN; i++)
factorial[i] = 0;
for (i = 2; i <= n; i++)
{
carry = 0;
for (j = 0; j < len; j++)
{
factorial[j] = factorial[j] * i + carry;
carry = factorial[j] / 10;
factorial[j] = factorial[j] % 10;
}
while (carry)
{
factorial[len++] = carry % 10;
carry = carry / 10;
}
}
}
Since you only need a four-fold improvement in time, the following may suffice:
Use a wider (unsigned) integer type, such as uint64_t.
Instead of calculating in base ten, use the largest power of ten, B, such that B•N fits in the integer type1, where N is the number you are computing the factorial of. For example, for 64-bit integers and 1,000,000!, you could use base 1013.
When doing multiplications, do not multiply by every digit in the product array, as the loop for (j = 0; j < len; j++) does. All digits beyond the first start as zero, and they slowly become non-zero as work progresses. Track the highest non-zero digit, and do multiplications only up to that digit, until the product carries into the next digit.
Similarly, the low digits become zero as the work progresses, due to accumulating factors of the base in the factorial. Track the lowest non-zero digit, and start work there.
A program demonstrating these is below.
A significant cost in this program is the divisions by the base. If you switch to a power-of-two base, these become bitwise operations (shifts for division and bitwise AND operations for remainders), which are much cheaper. This should speed up computing the factorial considerably. However, the final product will have to be converted to decimal for output. That will have a lower cost than computing entirely in decimal, so it is a win.
After that, you might consider this answer in Computer Science Stack Exchange. It suggests restructuring the factorial as powers of primes and using repeated squaring to compute the powers of primes, which are then multiplied.
This answer suggests using n! ≈ sqrt(2πn)•(n/e)n, which would require more sophisticated mathematics and programming.
Footnote
1 The purpose of using a power of ten is then the result can be directly printed from its base-B digits.
Demonstration
#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
/* Define T to be an unsigned integer type. The larger the better up to the
widest type supported efficiently (generally the widest type for which the
processor has arithmetic instructions).
Define PRIuT to be a printf conversion specifier to print a type T as an
unsigned decimal numeral.
*/
typedef uint64_t T;
#define PRIuT PRIu64
// Return the base-10 logarithm of x, rounded down.
static unsigned ilog10(T x)
{
unsigned log = 0;
for (; 10 <= x; x /= 10)
++log;
return log;
}
// Return 10**x.
static T iexp10(unsigned x)
{
T power = 1;
for (; 0 < x; --x)
power *= 10;
return power;
}
int main(void)
{
// Set the value we want the factorial of.
static const T N = 1000000;
// Set the maximum value of T by using wrapping.
static const T MaximumT = -1;
/* Determine the maximum number of decimal digits we can use easily:
Given a number with Digits decimal digits, Digits*N will be
representable in a T.
*/
unsigned Digits = ilog10(MaximumT/N);
/* Set Base to 10**Digits. This is the numerical base we will do
arithmetic in -- like base ten, but more efficient if it is bigger.
*/
T Base = iexp10(Digits);
/* Set an array size that is sufficient to contain N!
For 1 < N, N! < N**N, so the number of digits in N! is less than
log10(N**N) = N * log(10). Since we are using ilog10, which rounds
down, we add 1 to it to round up, ensuring we have enough room.
Then we divide that number of digits by the number of digits we will
have in each array element (and round up, by subtracting one before the
division and adding one after), and that is the number of array
elements we allocate.
*/
size_t S = (N * (ilog10(N)+1) - 1) / Digits + 1;
T *Product = malloc(S * sizeof *Product);
if (!Product)
{
fprintf(stderr,
"Error, unable to allocate %zu bytes.\n", S * sizeof *Product);
exit(EXIT_FAILURE);
}
/* Initialize the array to 1. L and H remember the index of the lowest
and highest non-zero array element, respectively. Since all the
elements before L or after H are zero, we do not need to use them in
the multiplication.
*/
Product[0] = 1;
size_t L = 0, H = 0;
// Multiply the product by the numbers from 2 to N.
for (T i = 2; i <= N; ++i)
{
// Start with no carry.
T carry = 0;
/* Multiply each significant base-Base digit by i, add the carry in,
and separate the carry out. We start separately with the lowest
non-zero element so we can track if it becomes zero.
*/
while (1)
{
T t = Product[L] * i + carry;
carry = t / Base;
if ((Product[L] = t % Base)) // Leave when digit is non-zero.
break;
++L; // If digit is zero, increase L.
}
for (size_t j = L+1; j <= H; ++j)
{
T t = Product[j] * i + carry;
carry = t / Base;
Product[j] = t % Base;
}
// If there is a final carry out, put it in a new significant digit.
if (0 != carry)
Product[++H] = carry;
}
/* Print the result. The first base-Base digit is printed with no
leading zeros. All subsequent base-Base digits are printed with
leading zeros as needed to ensure exactly Digit decimal digits are
printed.
*/
printf("%" PRIuT, Product[H]);
for (size_t j = H; 0 < j--;)
printf("%0*" PRIuT, Digits, Product[j]);
printf("\n");
free(Product);
}
The goal of this program is to find the smallest number that can be divided by the numbers 1 to 20 without any remainders. The code is working but it takes 33 seconds. Can I improve it so that it can be faster? How?
#include <stdio.h>
int main(){
int number = 19, i, k;
label:
number++;
k = 0;
for (i = 1; i <= 20; i++){
if (number%i == 0){
k++;
}
}
if (k != 20){
goto label;
}
printf("%d\n", number);
return 0;
}
#include <stdio.h>
/* GCD returns the greatest common divisor of a and b (which must be non-zero).
This algorithm comes from Euclid, Elements, circa 300 BCE, book (chapter)
VII, propositions 1 and 2.
*/
static unsigned GCD(unsigned a, unsigned b)
{
while (0 < b)
{
unsigned c = a % b;
a = b;
b = c;
}
return a;
}
int main(void)
{
static const unsigned Limit = 20;
unsigned LCM = 1;
/* Update LCM to the least common multiple of the LCM so far and the next
i. The least common multiple is obtained by multiplying the numbers
and removing the duplicated common factors by dividing by the GCD.
*/
for (unsigned i = 1; i <= Limit; ++i)
LCM *= i / GCD(LCM, i);
printf("The least common multiple of numbers from 1 to %u is %u.\n",
Limit, LCM);
}
Change
int number = 19 ;
to
int number = 0 ;
then:
number++;
to
number += 20 ;
is an obvious improvement that will have a significant impact even if it is still a somewhat naive brute force approach.
At onlinegdb.com your algorithm took 102 seconds to run whereas this change runs in less that one second and produces the same answer.
The initial product of primes value suggested in a comment will provide a further improvement.
You need to multiply all the least common multiples together, but omit numbers that could be multiplied to get any of the others. This translates to multiply by all primes less than N with each prime number raised to the highest power <= N.
const unsigned primes[] = {
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47
};
unsigned long long answer(unsigned n){ //for your example n=20
if (n>46) return 0; //will overflow 64 bit unsigned long long
unsigned long long tmp, ret = 1;
for (unsigned i = 0; primes[i]<=n;++i){ //each prime less than n
tmp = primes[i];
while ((tmp*primes[i])<=n) //highest power less than n
tmp *= primes[i];
ret *= tmp;
}
return ret;
}
usage: printf("%llu", answer(20));
If my math/code is right this should be fast and cover numbers up to 46. If your compiler supports unsigned __int128 it can be modified to go up to 88.
Explanation:
TLDR version: all numbers are either prime or can be made by multiplying primes.
To get the least common multiple of a set of numbers you break each
number into it's prime multiples and multiply the highest number of
each prime together.
Primes less than 20:
2,3,5,7,11,13,17,19
Non primes under 20:
4 = 2*2
6 = 2*3
8 = 2*2*2
9 = 3*3
10 = 2*5
12 = 2*2*3
14 = 2*2*7
15 = 3*5
16 = 2*2*2*2
18 = 2*3*3
20 = 2*2*5
From this we see that the maximum number of 2s is 4 and the maximum
number of 3s is 2.
2 to the 4th <= 20
3 squared <= 20
All powers >1 of the remaining
primes are greater than 20.
Therefore you get:
2*2*2*2*3*3*5*7*11*13*17*19
Which is what you would see if you watched the tmp variable in a
debugger.
Another reason this is faster is that it avoids modulus and division
(It's expensive on a lot of systems)
Here's a way to do it without defining primes or divisions (except for a single sqrt), using a Sieve of Eratosthenes (circa 200 BCE).
I mark composites with 1, and primes^x with -1. Then i just loop over the array of numbers from sqrt(n) to n and pull out the remaining primes and max power primes.
#include <stdio.h>
#include <math.h>
#define n 20
int main()
{
int prime [100]={0};
int rootN = sqrt(n);
unsigned long long inc,oldInc;
int i;
for (i=2; i<rootN; i++)
{
if (prime[i] == 1) continue;
//Classic Sieve
inc = i*i;
while (inc < n)
{
prime[inc] = 1;
inc += i;
}
//Max power of prime
oldInc = i;
inc = i * i;
while (inc < n)
{
prime[inc] = 1;
oldInc=inc;
inc *= i;
}
prime[oldInc] = -1;
prime[i] = 1;
}
inc = 1;
for(i=rootN; i<n; i++)
{
if (prime[i] == 0 || prime[i] == -1)
{
inc = inc * i;
}
}
printf("%llu",inc);
return 0;
}
So i'm supposed to find out the last 10 digits of 2^n(0<=n<=100) where n is the input. I found a method to handle large numbers but the program fails when n>64. Any leads on how to go about with this would be appreciated.
#include<stdio.h>
#include<math.h>
/* Iterative Function to calculate (x^y)%p in O(log y) */
int power(long long int x, long long int y, long long int p)
{
long long int res = 1; // Initialize result
x = x % p; // Update x if it is more than or
// equal to p
while (y > 0) {
// If y is odd, multiply x with result
if (y & 1)
res = (res * x) % p;
// y must be even now
y = y >> 1; // y = y/2
x = (x * x) % p;
}
return res;
}
// C function to print last 10 digits of a^b
void printLastDigits(long long int a,long long int b)
{
long long int temp = pow(10,10);
// Calling modular exponentiation
temp = power(a, b, temp);
if (temp)
printf("%d",temp);
}
int main()
{
long long int n;
scanf("%d",&n);
printLastDigits(2,n);
return 0;
}
You don't need to worry about the 'high' bits, since multiplication by 2 left shifts them out of range of the lower part of the product you're interesting in. Just be sure you're using the unsigned long long type (of at least 64 bits) to hold integral types that are wide enough, e.g.,
#include <inttypes.h>
#include <stdio.h>
void low_digits (unsigned int n)
{
unsigned long long base = 2, modulus = 10000000000ULL;
for (unsigned int i = 1; i <= n; i++)
{
fprintf(stdout, "2^%u mod 10^10 = %llu\n", i, base);
base = (base * 2) % modulus;
}
}
You can test 2^1000 with a bignum calculator:
10715086071862673209484250490600018105614048117055336074437503883703\
51051124936122493198378815695858127594672917553146825187145285692314\
04359845775746985748039345677748242309854210746050623711418779541821\
53046474983581941267398767559165543946077062914571196477686542167660\
429831652624386837205668069376
while n = 1000 above yields: 5668069376
Others have noted, that this is a naive method, and modular exponentiation is far more efficient for sufficiently large values of (n). Unfortunately, this is going to require products that exceed the range of an unsigned 64-bit value, so unless you're prepared to implement [hi64][lo64] multi-precision mul / mod operations, it's probably beyond the scope of your task.
Fortunately, later versions of gcc and clang do provide an extended 128 bit integral type:
#include <inttypes.h>
#include <stdio.h>
void low_digits (unsigned int n)
{
unsigned long long base = 2, modulus = 10000000000ULL;
__extension__ unsigned __int128 u = 1, w = base;
while (n != 0)
{
if ((n & 0x1) != 0)
u = (u * w) % modulus; /* (mul-reduce) */
if ((n >>= 1) != 0)
w = (w * w) % modulus; /* (sqr-reduce) */
}
base = (unsigned long long) u;
fprintf(stdout, "2^%u mod 10^10 = %llu\n", n, base);
}
The following uses strings to perform the multiplication:
void lastdigits(char digits[11], int n)
{
int i, j, x, carry;
for (i=0; i<n;i++) {
for (j=9, carry=0; j>=0; j--) {
x= digits[j]-'0';
x *= 2;
x += carry;
if (x>9) {carry= 1; x -= 10;}
else carry= 0;
digits[j]= x+'0';
}
}
}
void test(void)
{
char digits[11];
strcpy(digits,"0000000001");
lastdigits(digits,10);
printf("%s\n",digits);
strcpy(digits,"0000000001");
lastdigits(digits,20);
printf("%s\n",digits);
strcpy(digits,"0000000001");
lastdigits(digits,100);
printf("%s\n",digits);
}
Output:
0000001024
0001048576
6703205376
Since the other answers you've received don't actually show what you're doing wrong:
x = (x * x) % p;
You're assuming that x * x still fits in long long int. But if x is 0x100000000 (4294967296, for 10 decimal digits) and long long int is 64 bits, then it will not fit.
Either:
You need a way to accurately multiply two arbitrary 10-digit numbers. The result may have 20 digits and may not fit in long long int or even unsigned long long int. This means you'd need to use some bigint library or implement something like that yourself.
Or:
You need to avoid multiplying multiple possibly-10-digit numbers.
The answer you've accepted opts for simple repeated multiplication by 2. This is sufficient for your problem now, but beware that this does significantly increase the complexity if you want to allow very large exponents.
Let's say you are finding the last digit of 2^n, you just need to consider last digit and ignore every other digit
1. 2*2 = 4
2. 4*2 = 8
3. 8*2 = 16 (ignore last-but-one digit i.e 1)
4. 6*2 = 12 (ignore last-but-one digit i.e 1)
5. 2*2 = 4
6. 4*2 = 8
7. 8*2 = 16 (ignore last-but-one digit i.e 1)
8. 6*2 = 12 (ignore last-but-one digit i.e 1)
9. 2*2 = 4
... n-1 iterations
To find the last 2 digits of 2^n, ignore all digits except last 2 digits.
1. 2*2 = 4
2. 4*2 = 8
3. 8*2 = 16
4. 16*2 = 32
5. 32*2 = 64
6. 64*2 = 128 (Consider last 2 digits)
7. 28*2 = 56
8. 56*2 = 112 (Consider last 2 digits)
9. 12*2 = 24
... n-1 iterations
Similarly, to find the last 10 digits of 2^n, consider just last 10 digits at each iteration and repeat it for n-1 iterations.
Note:
With this approach, the biggest number you'll get during the calculation can be of 11 digits ~ 10^11, while for a 64-bit machine the max value is ~ 2^64 = ~ 10^18
I need generate random 64-bit unsigned integers using C. I mean, the range should be 0 to 18446744073709551615. RAND_MAX is 1073741823.
I found some solutions in the links which might be possible duplicates but the answers mostly concatenates some rand() results or making some incremental arithmetic operations. So results are always 18 digits or 20 digits. I also want outcomes like 5, 11, 33387, not just 3771778641802345472.
By the way, I really don't have so much experience with the C but any approach, code samples and idea could be beneficial.
Concerning "So results are always 18 digits or 20 digits."
See #Thomas comment. If you generate random numbers long enough, code will create ones like 5, 11 and 33387. If code generates 1,000,000,000 numbers/second, it may take a year as very small numbers < 100,000 are so rare amongst all 64-bit numbers.
rand() simple returns random bits. A simplistic method pulls 1 bit at a time
uint64_t rand_uint64_slow(void) {
uint64_t r = 0;
for (int i=0; i<64; i++) {
r = r*2 + rand()%2;
}
return r;
}
Assuming RAND_MAX is some power of 2 - 1 as in OP's case 1073741823 == 0x3FFFFFFF, take advantage that 30 at least 15 bits are generated each time. The following code will call rand() 5 3 times - a tad wasteful. Instead bits shifted out could be saved for the next random number, but that brings in other issues. Leave that for another day.
uint64_t rand_uint64(void) {
uint64_t r = 0;
for (int i=0; i<64; i += 15 /*30*/) {
r = r*((uint64_t)RAND_MAX + 1) + rand();
}
return r;
}
A portable loop count method avoids the 15 /*30*/ - But see 2020 edit below.
#if RAND_MAX/256 >= 0xFFFFFFFFFFFFFF
#define LOOP_COUNT 1
#elif RAND_MAX/256 >= 0xFFFFFF
#define LOOP_COUNT 2
#elif RAND_MAX/256 >= 0x3FFFF
#define LOOP_COUNT 3
#elif RAND_MAX/256 >= 0x1FF
#define LOOP_COUNT 4
#else
#define LOOP_COUNT 5
#endif
uint64_t rand_uint64(void) {
uint64_t r = 0;
for (int i=LOOP_COUNT; i > 0; i--) {
r = r*(RAND_MAX + (uint64_t)1) + rand();
}
return r;
}
The autocorrelation effects commented here are caused by a weak rand(). C does not specify a particular method of random number generation. The above relies on rand() - or whatever base random function employed - being good.
If rand() is sub-par, then code should use other generators. Yet one can still use this approach to build up larger random numbers.
[Edit 2020]
Hallvard B. Furuseth provides as nice way to determine the number of bits in RAND_MAX when it is a Mersenne Number - a power of 2 minus 1.
#define IMAX_BITS(m) ((m)/((m)%255+1) / 255%255*8 + 7-86/((m)%255+12))
#define RAND_MAX_WIDTH IMAX_BITS(RAND_MAX)
_Static_assert((RAND_MAX & (RAND_MAX + 1u)) == 0, "RAND_MAX not a Mersenne number");
uint64_t rand64(void) {
uint64_t r = 0;
for (int i = 0; i < 64; i += RAND_MAX_WIDTH) {
r <<= RAND_MAX_WIDTH;
r ^= (unsigned) rand();
}
return r;
}
If you don't need cryptographically secure pseudo random numbers, I would suggest using MT19937-64. It is a 64 bit version of Mersenne Twister PRNG.
Please, do not combine rand() outputs and do not build upon other tricks. Use existing implementation:
http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt64.html
Iff you have a sufficiently good source of random bytes (like, say, /dev/random or /dev/urandom on a linux machine), you can simply consume 8 bytes from that source and concatenate them. If they are independent and have a linear distribution, you're set.
If you don't, you MAY get away by doing the same, but there is likely to be some artefacts in your pseudo-random generator that gives a toe-hold for all sorts of hi-jinx.
Example code assuming we have an open binary FILE *source:
/* Implementation #1, slightly more elegant than looping yourself */
uint64_t 64bitrandom()
{
uint64_t rv;
size_t count;
do {
count = fread(&rv, sizeof(rv), 1, source);
} while (count != 1);
return rv;
}
/* Implementation #2 */
uint64_t 64bitrandom()
{
uint64_t rv = 0;
int c;
for (i=0; i < sizeof(rv); i++) {
do {
c = fgetc(source)
} while (c < 0);
rv = (rv << 8) | (c & 0xff);
}
return rv;
}
If you replace "read random bytes from a randomness device" with "get bytes from a function call", all you have to do is to adjust the shifts in method #2.
You're vastly more likely to get a "number with many digits" than one with "small number of digits" (of all the numbers between 0 and 2 ** 64, roughly 95% have 19 or more decimal digits, so really that is what you will mostly get.
If you are willing to use a repetitive pseudo random sequence and you can deal with a bunch of values that will never happen (like even numbers? ... don't use just the low bits), an LCG or MCG are simple solutions. Wikipedia: Linear congruential generator can get you started (there are several more types including the commonly used Wikipedia: Mersenne Twister). And this site can generate a couple prime numbers for the modulus and the multiplier below. (caveat: this sequence will be guessable and thus it is NOT secure)
#include <stdio.h>
#include <stdint.h>
uint64_t
mcg64(void)
{
static uint64_t i = 1;
return (i = (164603309694725029ull * i) % 14738995463583502973ull);
}
int
main(int ac, char * av[])
{
for (int i = 0; i < 10; i++)
printf("%016p\n", mcg64());
}
I have tried this code here and it seems to work fine there.
#include <time.h>
#include <stdlib.h>
#include <math.h>
int main(){
srand(time(NULL));
int a = rand();
int b = rand();
int c = rand();
int d = rand();
long e = (long)a*b;
e = abs(e);
long f = (long)c*d;
f = abs(f);
long long answer = (long long)e*f;
printf("value %lld",answer);
return 0;
}
I ran a few iterations and i get the following outputs :
value 1869044101095834648
value 2104046041914393000
value 1587782446298476296
value 604955295827516250
value 41152208336759610
value 57792837533816000
If you have 32 or 16-bit random value - generate 2 or 4 randoms and combine them to one 64-bit with << and |.
uint64_t rand_uint64(void) {
// Assuming RAND_MAX is 2^31.
uint64_t r = rand();
r = r<<30 | rand();
r = r<<30 | rand();
return r;
}
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
unsigned long long int randomize(unsigned long long int uint_64);
int main(void)
{
srand(time(0));
unsigned long long int random_number = randomize(18446744073709551615);
printf("%llu\n",random_number);
random_number = randomize(123);
printf("%llu\n",random_number);
return 0;
}
unsigned long long int randomize(unsigned long long int uint_64)
{
char buffer[100] , data[100] , tmp[2];
//convert llu to string,store in buffer
sprintf(buffer, "%llu", uint_64);
//store buffer length
size_t len = strlen(buffer);
//x : store converted char to int, rand_num : random number , index of data array
int x , rand_num , index = 0;
//condition that prevents the program from generating number that is bigger input value
bool Condition = 0;
//iterate over buffer array
for( int n = 0 ; n < len ; n++ )
{
//store the first character of buffer
tmp[0] = buffer[n];
tmp[1] = '\0';
//convert it to integer,store in x
x = atoi(tmp);
if( n == 0 )
{
//if first iteration,rand_num must be less than or equal to x
rand_num = rand() % ( x + 1 );
//if generated random number does not equal to x,condition is true
if( rand_num != x )
Condition = 1;
//convert character that corrosponds to integer to integer and store it in data array;increment index
data[index] = rand_num + '0';
index++;
}
//if not first iteration,do the following
else
{
if( Condition )
{
rand_num = rand() % ( 10 );
data[index] = rand_num + '0';
index++;
}
else
{
rand_num = rand() % ( x + 1 );
if( rand_num != x )
Condition = 1;
data[index] = rand_num + '0';
index++;
}
}
}
data[index] = '\0';
char *ptr ;
//convert the data array to unsigned long long int
unsigned long long int ret = _strtoui64(data,&ptr,10);
return ret;
}