Related
#include <cs50.h>
#include <stdio.h>
#include <string.h>
// Multiply every other digit by 2, starting with the number’s second-to-last digit, and then add those products’ digits together.
// Add the sum to the sum of the digits that weren’t multiplied by 2.
// If the total’s last digit is 0 (or, put more formally, if the total modulo 10 is congruent to 0), the number is valid!
// 4003600000000014 - VISA TEST
// 123456789
int length(long n);
int main(void)
{
long ccnumber = get_long_long("Number: ");
long x = 10; //Gets the 2nd digit.
long y = 1; //gets the first.
long sum1 = 0; //Holds the doubled sum.
long sum2 = 0; //Holds the undoubled sum.
int n = 8;
while (n > 0)
{
long cc1 = ccnumber / y;
cc1 = cc1 % 10;
long cc2 = ccnumber / x;
cc2 = cc2 % 10;
y = y * 100;
x = x * 100;
// Times CC2 by 2.
cc2 = cc2 * 2;
// Check if double digit then mine by 9, as to add the two digits together.
if (cc2 / 10 != 0)
{
cc2 = cc2 - 9;
}
sum1 = sum1 + cc2;
sum2 = sum2 + cc1;
}
}
I have tried checking if it is 0 before dividing by 10. I have tried some other checks with the cc1 and cc2 but cant work it out. I am doing the CS50 course and tring to do the Credit problem set, This is the best I have done so far but it is suddenly throwing this error since trying to do the next step of checking if the total %10 == 0.
The problem is really the infinite loop. But that, by itself, doesn't result in the core dump. What happens is best illustrated by adding a print statement at the bottom of the loop to display the values of a few variables:
#include <cs50.h>
#include <stdio.h>
#include <string.h>
// Multiply every other digit by 2, starting with the number’s second-to-last digit, and then add those products’ digits together.
// Add the sum to the sum of the digits that weren’t multiplied by 2.
// If the total’s last digit is 0 (or, put more formally, if the total modulo 10 is congruent to 0), the number is valid!
// 4003600000000014 - VISA TEST
// 123456789
int length(long n);
int main(void)
{
long ccnumber = get_long_long("Number: ");
long x = 10; //Gets the 2nd digit.
long y = 1; //gets the first.
long sum1 = 0; //Holds the doubled sum.
long sum2 = 0; //Holds the undoubled sum.
int n = 8;
while (n > 0)
{
long cc1 = ccnumber / y;
cc1 = cc1 % 10;
long cc2 = ccnumber / x;
cc2 = cc2 % 10;
y = y * 100;
x = x * 100;
// Times CC2 by 2.
cc2 = cc2 * 2;
// Check if double digit then mine by 9, as to add the two digits together.
if (cc2 / 10 != 0)
{
cc2 = cc2 - 9;
}
sum1 = sum1 + cc2;
sum2 = sum2 + cc1;
printf("%d, %ld, %ld, %ld, %ld\n",n,cc2,cc1,x,y);
}
}
What you'll see is that x and y are eventually overloaded and reset to 0, which will cause division by 0 with associated fp error and core dump.
$ ./a.out
Number: 5
8, 0, 5, 1000, 100
8, 0, 0, 100000, 10000
8, 0, 0, 10000000, 1000000
8, 0, 0, 1000000000, 100000000
8, 0, 0, 100000000000, 10000000000
8, 0, 0, 10000000000000, 1000000000000
8, 0, 0, 1000000000000000, 100000000000000
8, 0, 0, 100000000000000000, 10000000000000000
8, 0, 0, -8446744073709551616, 1000000000000000000
8, 0, 0, 3875820019684212736, 7766279631452241920
8, 0, 0, 200376420520689664, 1864712049423024128
8, 0, 0, 1590897978359414784, 2003764205206896640
8, 0, 0, -6930898827444486144, -2537764290115403776
8, 0, 0, 7886392056514347008, 4477988020393345024
8, 0, 0, -4570789518076018688, 5076944270305263616
8, 0, 0, 4089650035136921600, -8814407033341083648
8, 0, 0, 3136633892082024448, 4003012203950112768
8, 0, 0, 68739955140067328, -5527149226598858752
8, 0, 0, 6873995514006732800, 687399551400673280
8, 0, 0, 4870020673419870208, -5047021154770878464
8, 0, 0, 7386721425538678784, -6640025486929952768
8, 0, 0, 802379605485813760, 80237960548581376
8, 0, 0, 6450984253743169536, 8023796054858137600
8, 0, 0, -537617205517352960, 9169610316303040512
8, 0, 0, 1578511669393358848, -5376172055173529600
8, 0, 0, -8169529724050079744, -2661627379775963136
8, 0, 0, -5296233161787703296, -7908320945662590976
8, 0, 0, 5332261958806667264, 2377900603251621888
8, 0, 0, -1729382256910270464, -2017612633061982208
8, 0, 0, -6917529027641081856, 1152921504606846976
8, 0, 0, -9223372036854775808, 4611686018427387904
8, 0, 0, 0, 0
Floating point exception (core dumped)
I'm searching for a fast permutation of bits within a number in C.
I have an integer value and table of permutation positions. Not all possible permutations are listet, just n!*2n of the 2n! possible permutations.
For simplicity I will show the data as 4-Bit numbers (n=2, lutsize = 8), in reality they are 32-Bit numbers (n=5, lutsize=3840):
static const uint8_t lut[lutsize][4] = {
{0, 1, 2, 3},
{1, 0, 3, 2},
{2, 3, 0, 1},
{3, 2, 1, 0},
{0, 2, 1, 3},
{1, 3, 0, 2},
{2, 0, 3, 1},
{3, 1, 2, 0}};
So the first entry is the identity, for the second entry the bits 1&2 and 2&3 switch places and so on. I wrote a method which calculates the values and returns the minimal value:
#include <stdio.h>
#include <stdint.h>
#include <assert.h>
enum { n = 2 };
enum { expn = 1<<n };
enum { lutsize = 8 };
static const uint8_t lut[lutsize][4] = {
{0, 1, 2, 3},
{1, 0, 3, 2},
{2, 3, 0, 1},
{3, 2, 1, 0},
{0, 2, 1, 3},
{1, 3, 0, 2},
{2, 0, 3, 1},
{3, 1, 2, 0}
};
uint32_t getMin(uint32_t in, uint32_t* ptr){
uint32_t pos, i, tmp, min;
uint8_t bits[expn];
tmp = in;
for (i = 0; i < expn; ++i){ // extract bits into array
bits[i] = tmp & 1;
tmp = tmp >> 1;
}
min = in;
for (pos = 0; pos < lutsize; ++pos){ // for each row in the permutation table
tmp = 0;
for (i = 0; i < expn; ++i){ // for each bit
tmp = tmp | (bits[i] << lut[pos][i]); // put bit i on position lut[pos][i]
}
ptr[pos] = tmp; // store result (optional)
//printf("%d: %d\n", pos, tmp);
if (tmp<min) { min = tmp; } // track minimum
}
//printf("min: %d\n", min);
return min;
}
void main(int argc, const char * argv[]) {
uint32_t ptr[lutsize];
printf("%u\n", (unsigned)getMin(5, ptr));
assert(getMin(5, ptr) == 3);
assert(getMin(0b0110, ptr) == 6);
}
gives the output for the example number 5 if you uncomment the printf's:
0: 5
1: 10
2: 5
3: 10
4: 3
5: 3
6: 12
7: 12
min: 3
I do calculate each permutation in a naive way by separating the bits into an array and then pushing them into their new position. I write the results into an array and keep track of the minimum which is returned at the end.
How do improve the performance of the procedure getMin for n=5?
I'm primarily interested in the minimum.
Additional input:
the permutation table can be read as "bit value comes from position" or "bit value shifts to position". The results are the same, just the order of the entries differs.
it is not necessary (but a bonus) to use the array (ptr)
the goal is to reduce the time to compute the smallest bit permutation number for all 32-bit numbers in random order. Precalculating all possible minimum numbers is ok. Using multiple versions of the lut is ok. Anything is ok as long it does not take more than 4GB of ram like storing all possible input/output pairs would do.
sorry for the not so c-like code. I'm out of training.
Here is the python code to generate the look up table for n=5:
from itertools import permutations
import numpy as np
import math
for n in [5]:
expn = 1<<n #2:4 3:8; 4:16; 5:32
order = np.zeros((int(math.factorial(n))*expn, expn), dtype=np.int8) # n!*2^n x 2^n matrix
cnt = 0 # current entry
permneg = np.zeros(expn, dtype=int)
for perm in permutations(list(range(n))): #shuffle order
ini = np.array(list(range(expn)), dtype=int) # list 0 to expn-1
out = np.zeros(expn, dtype=int)
for i in range(n): # apply permutation on position level
tmp = ini & 1 # get value on least bit
out += tmp << perm[i] #put it on the right postion
ini = ini >> 1 # next bit as least bit
for neg in range(expn): # include 0/1 swaps (negation)
for i in range(len(out)): # for all 2^n negation cases
permneg[i] = out[i] ^ neg # negate on negation bits
order[cnt]=permneg # store
cnt += 1 # increase counter
print("enum { lutsize = "+str(len(order))+" };")
s='},\n{'.join(', '.join(map(str,sl)) for sl in order)
print("static const uint8_t lut[lutsize]["+str(expn)+"] = {\n{"+s+"}\n};")
I would suggest two optimizations : 1) store the value of the set bit in the table instead. 2) Don't store each bit in an array but compute it on the fly instead. The result is the same, but it's probably a little bit faster.
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#define lutsize 8
static const uint8_t lutn[lutsize][4] = {
{1, 2, 4, 8},
{2, 1, 8, 4},
{4, 8, 1, 2},
{8, 4, 2, 1},
{1, 4, 2, 8},
{2, 8, 1, 4},
{4, 1, 8, 2},
{8, 2, 4, 1}};
uint32_t getMin(uint32_t in, uint32_t* ptr)
{
uint32_t pos, i, tmp, min;
uint8_t bits[expn];
tmp = in;
min = in;
for (pos = 0; pos < lutsize; ++pos){
tmp = 0;
for (i = 0; i < expn; ++i) {
// If the bit is set
if (in & (1<<i))
// get the value of the permuted bit in the table
tmp = tmp | lutn[pos][i];
}
ptr[pos] = tmp;
//printf("%d: %d\n", pos, tmp);
if (tmp<min) { min = tmp; } // track minimum
}
//printf("min: %d\n", min);
return min;
}
void main(int argc, const char * argv[]) {
uint32_t *ptr;
ptr = (uint32_t*) malloc(lutsize * sizeof(uint32_t));
getMin(5, ptr);
}
It seems to me that, not having all the possible permutations, you need to check all of those you have. Calculating the best permutation is not fruitful if you aren't sure that permutation is available.
So, the two optimizations remaining you can hope for are:
Stop the search if you achieve a minimum.
For example, if I have understood, you have the binary number 11001101. What is the minimum permutation value? Obviously 00011111, with all ones to the right. What exact permutation does this for you - whether the last 1 is the first, second, fifth, sixth or eight bit - matters not to you.
As soon as a permutation hits the magic value, that is the minimum and you know you can do no better, so you can just as well stop searching.
In your own example you hit the minimum value 3 (or 0011, which is the minimum from 1001 or 5) at iteration 4. You gained nothing from iterations five to seven.
If you have 1101, Go from { 0, 1, 3, 2 } to the bit-permutated value "1110" as fast as possible.
Not sure about the performances, but you could try precalculating the values from the table, even if this requires a 32x32 = 1024 32-value table - 4M of memory.
Considering the first position (our number is 1101), the permutation can have in position 0 only these values: 0, 1, 2, or 3. These have a value of 1___, 1___, 0___ or 1___. So our position 0 table is { 1000, 1000, 0000, 1000 }.
The second position is identical: { 1_, 1_, 0_, 1_ } and so we have { 0100, 0100, 0000, 0100 }.
The next two rows are { 0010, 0010, 0000, 0010 }
{ 0001, 0001, 0000, 0001 }.
Now, you want to calculate the value if permutation { 3, 0, 1, 2 } is applied:
for (i = 0; i < 4; i++) {
value |= table[i][permutation[i]];
}
===
But (again, if I understood the problem), let us say that the number has one fourth zeroes (it is made up of 24 1's, 8 0's) and the table is sizeable enough.
There is a very significant chance that you very soon hit both a permutation that puts a bit 1 in the 31st position, yielding some 2 billion, and another that puts a 0 there, yielding one billion.
The minimum will then surely be below two billions, so from now on, as soon as you check the first permutated bit and find it is a 1, you can stop calculating, you won't get a better minimum out of that permutation. Skip immediately to the next.
If you do this check at every cycle instead of only the first,
for (i = 0; i < 4; i++) {
value |= table[i][permutation[i]];
if (value > minimum) {
break;
}
}
I fear that you might eat any gains, but on the other hand, since values are automatically ordered in descending order, the first bits would get eliminated the most, avoiding the subsequent checks. It might be worth checking.
Nothing stops you from doing the calculation in two steps:
for (i = 0; i < 8; i++) {
value |= table[i][permutation[i]];
if (value > minimum) {
break;
}
}
if (value < minimum) {
for (; i < 32; i++) {
value |= table[i][permutation[i]];
}
}
I have an array of ints that represent a binary number. I need to convert it to its decimal integer equivalent. Can someone please help me?
int octet[8] = {0, 1, 0, 0, 1, 0, 1, 0};
I want to return the decimal equivalent which would be 74 in this case.
This is an example of how you could do something like that:
#include <stdio.h>
int main()
{
int octet[] = {0, 1, 0, 0, 1, 0, 1, 0};
int *p = octet;
int len = sizeof(octet) / sizeof(octet[0]); // 8
unsigned result = 0;
while(len--) result = (result<<1) | *p++;
/* 74 */
printf("%u\n", result);
}
2^0 x the last element + 2^1 x the second-to-last element + 2^2 x the third-to-last element + ...
#include <stdio.h>
int main()
{
// Make array constant and let the compiler tell us how many elements are contained.
// Doing it this way allows "octet" to grow up to the number of bits in an integer elements.
// It won't be an "octet" anymore though,...
const int octet[] = {0, 1, 0, 0, 1, 0, 1, 0};
const int NumArrayElements = sizeof(octet) / sizeof(*octet);
// Assign hi bit to save a loop iteration.
int value = octet[0];
// Run remaining bits, shifting up and or-ing in new values.
for(int i=1; i<NumArrayElements; i++)
{
value <<= 1;
value |= octet[i];
}
printf("%d\n", value);
}
This is a simple thing to do, but having the compiler handle some of the sizing issues and adding comments makes it much easier to maintain over time.
I have an array of 10 elements, and I need to make that array into its moving average equivalent.
Using 3 elements each time (eg average of elements at indices 0-2, then 1-3 and so on up to indices from 10 then back to 0 and 1 to make the new array have exactly 10 elements as well).
What is the best approach to this without using pointers to wrap the array around (ring buffer).
Just do some bounds checking and wrap the index in code.
The example code below can be made more efficient, but it's written like this for clarity (sort of). Also there may be some minor mistake since I'm typing into StackOverflow rather than compiling it.
int array[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
int averages[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < 10; i++)
{
int a = i;
int b = i + 1 > 9 ? i - 10 + 1;
int c = i + 2 > 9 ? i - 10 + 2;
int count = array[a] + array[b] + array[c];
int average = count / 3; // note this will truncate any decimal part
averages[i] = average;
}
Let's say I have an integer called 'score', that looks like this:
int score = 1529587;
Now what I want to do is get each digit 1, 5, 2, 9, 5, 8, 7 from the score using bitwise operators(See below edit note).
I'm pretty sure this can be done since I've once used a similar method to extract the red green and blue values from a hexadecimal colour value.
How would I do this?
Edit
It doesn't necessarily have to be bitwise operators, I just thought it'd be simpler that way.
You use the modulo operator:
while(score)
{
printf("%d\n", score % 10);
score /= 10;
}
Note that this will give you the digits in reverse order (i.e. least significant digit first). If you want the most significant digit first, you'll have to store the digits in an array, then read them out in reverse order.
RGB values fall nicely on bit boundaries; decimal digits don't. I don't think there's an easy way to do this using bitwise operators at all. You'd need to use decimal operators like modulo 10 (% 10).
Agree with previous answers.
A little correction: There's a better way to print the decimal digits from left to right, without allocating extra buffer. In addition you may want to display a zero characeter if the score is 0 (the loop suggested in the previous answers won't print anythng).
This demands an additional pass:
int div;
for (div = 1; div <= score; div *= 10)
;
do
{
div /= 10;
printf("%d\n", score / div);
score %= div;
} while (score);
Don't reinvent the wheel. C has sprintf for a reason.
Since your variable is called score, I'm guessing this is for a game where you're planning to use the individual digits of the score to display the numeral glyphs as images. In this case, sprintf has convenient format modifiers that will let you zero-pad, space-pad, etc. the score to a fixed width, which you may want to use.
This solution gives correct results over the entire range [0,UINT_MAX]
without requiring digits to be buffered.
It also works for wider types or signed types (with positive values) with appropriate type changes.
This kind of approach is particularly useful on tiny environments (e.g. Arduino bootloader) because it doesn't end up pulling in all the printf() bloat (when printf() isn't used for demo output) and uses very little RAM. You can get a look at value just by blinking a single led :)
#include <limits.h>
#include <stdio.h>
int
main (void)
{
unsigned int score = 42; // Works for score in [0, UINT_MAX]
printf ("score via printf: %u\n", score); // For validation
printf ("score digit by digit: ");
unsigned int div = 1;
unsigned int digit_count = 1;
while ( div <= score / 10 ) {
digit_count++;
div *= 10;
}
while ( digit_count > 0 ) {
printf ("%d", score / div);
score %= div;
div /= 10;
digit_count--;
}
printf ("\n");
return 0;
}
Usually, this problem resolve with using the modulo of a number in a loop or convert a number to a string. For convert a number to a string, you may can use the function itoa, so considering the variant with the modulo of a number in a loop.
Content of a file get_digits.c
$ cat get_digits.c
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// return a length of integer
unsigned long int get_number_count_digits(long int number);
// get digits from an integer number into an array
int number_get_digits(long int number, int **digits, unsigned int *len);
// for demo features
void demo_number_get_digits(long int number);
int
main()
{
demo_number_get_digits(-9999999999999);
demo_number_get_digits(-10000000000);
demo_number_get_digits(-1000);
demo_number_get_digits(-9);
demo_number_get_digits(0);
demo_number_get_digits(9);
demo_number_get_digits(1000);
demo_number_get_digits(10000000000);
demo_number_get_digits(9999999999999);
return EXIT_SUCCESS;
}
unsigned long int
get_number_count_digits(long int number)
{
if (number < 0)
number = llabs(number);
else if (number == 0)
return 1;
if (number < 999999999999997)
return floor(log10(number)) + 1;
unsigned long int count = 0;
while (number > 0) {
++count;
number /= 10;
}
return count;
}
int
number_get_digits(long int number, int **digits, unsigned int *len)
{
number = labs(number);
// termination count digits and size of a array as well as
*len = get_number_count_digits(number);
*digits = realloc(*digits, *len * sizeof(int));
// fill up the array
unsigned int index = 0;
while (number > 0) {
(*digits)[index] = (int)(number % 10);
number /= 10;
++index;
}
// reverse the array
unsigned long int i = 0, half_len = (*len / 2);
int swap;
while (i < half_len) {
swap = (*digits)[i];
(*digits)[i] = (*digits)[*len - i - 1];
(*digits)[*len - i - 1] = swap;
++i;
}
return 0;
}
void
demo_number_get_digits(long int number)
{
int *digits;
unsigned int len;
digits = malloc(sizeof(int));
number_get_digits(number, &digits, &len);
printf("%ld --> [", number);
for (unsigned int i = 0; i < len; ++i) {
if (i == len - 1)
printf("%d", digits[i]);
else
printf("%d, ", digits[i]);
}
printf("]\n");
free(digits);
}
Demo with the GNU GCC
$~/Downloads/temp$ cc -Wall -Wextra -std=c11 -o run get_digits.c -lm
$~/Downloads/temp$ ./run
-9999999999999 --> [9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
-10000000000 --> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-1000 --> [1, 0, 0, 0]
-9 --> [9]
0 --> [0]
9 --> [9]
1000 --> [1, 0, 0, 0]
10000000000 --> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
9999999999999 --> [9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
Demo with the LLVM/Clang
$~/Downloads/temp$ rm run
$~/Downloads/temp$ clang -std=c11 -Wall -Wextra get_digits.c -o run -lm
setivolkylany$~/Downloads/temp$ ./run
-9999999999999 --> [9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
-10000000000 --> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-1000 --> [1, 0, 0, 0]
-9 --> [9]
0 --> [0]
9 --> [9]
1000 --> [1, 0, 0, 0]
10000000000 --> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
9999999999999 --> [9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
Testing environment
$~/Downloads/temp$ cc --version | head -n 1
cc (Debian 4.9.2-10) 4.9.2
$~/Downloads/temp$ clang --version
Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)
Target: x86_64-pc-linux-gnu
Thread model: posix
#include<stdio.h>
int main() {
int num; //given integer
int reminder;
int rev=0; //To reverse the given integer
int count=1;
printf("Enter the integer:");
scanf("%i",&num);
/*First while loop will reverse the number*/
while(num!=0)
{
reminder=num%10;
rev=rev*10+reminder;
num/=10;
}
/*Second while loop will give the number from left to right*/
while(rev!=0)
{
reminder=rev%10;
printf("The %d digit is %d\n",count, reminder);
rev/=10;
count++; //to give the number from left to right
}
return (EXIT_SUCCESS);}
First convert your integer to a string using sprintf, then do whatever you want with its elements, that are chars. Assuming an unsigned score:
unsigned int score = 1529587, i;
char stringScore [11] = { 0 };
sprintf( stringScore, "%d, score );
for( i=0; i<strlen(stringScore); i++ )
printf( "%c\n", stringScore[i] );
Please note how:
It prints digits starting from the most significant one
stringScore is 11 characters long assuming that the size of int, in your platform, is 4 bytes, so that the maximum integer is 10 digits long. The eleventh one is for the string terminator character '\0'.
sprintf makes all the work for you
Do you need to have an integer for every single digit?
Since we are sure that stringScore contains only digits, the conversion is really easy. If dig is the character containing the digit, the corresponding integer can be obtained in this way:
int intDigit = dig - '0';
//this can be easily understandable for beginners
int score=12344534;
int div;
for (div = 1; div <= score; div *= 10)
{
}
/*for (div = 1; div <= score; div *= 10); for loop with semicolon or empty body is same*/
while(div>1)
{
div /= 10;
printf("%d\n`enter code here`", score / div);
score %= div;
}
I've made this solution, it-s simple instead read an integer, i read a string (char array in C), then write with a for bucle, the code also write the sum of digits
// #include<string.h>
scanf("%s", n);
int total = 0;
for (int i = 0; i< strlen(n); i++){
printf("%c", n[i]);
total += (int)(n[i]) -48;
}
printf("%d", total);