Related
I tried to parse uint64_t array to an array of char (result in decimal, separated by comma).
I used memcpy, every time I get a random values. iota() function converts max uint32_t values. I tried separate uint64_t to 2 uint32_t, but I never get a right result.
uint64_t numbers[10] = { 201234567890123456,
12345678901234567890,
98765432109876543,
65432109887,
12345234512345,
217631276371261627,
12354123512453124,
2163521442531,
2341232142132321,
1233432112 };
char array[1000] = "";
Expected result:
array = "201234567890123456,12345678901234567890,98765432109876543,65432109887,12345234512345,217631276371261627,12354123512453124,2163521442531,2341232142132321,1233432112"
I tried int64ToChar from this topic, but result is:
void uint64ToChar(char a[], int64_t n) {
memcpy(a, &n, 10);
}
uint64_t number = 12345678900987654;
char output[30] = "";
uint64ToChar(output, number);
Result:
�g]T�+
Thanks for any help.
Ue snpintf() to convert the 64-bit numbers:
#include <inttypes.h>
#include <stdio.h>
#include <stdint.h>
int main() {
uint64_t numbers[10] = { 201234567890123456,
12345678901234567890,
98765432109876543,
65432109887,
12345234512345,
21763127637126371627,
12354123512453124,
2163521442531,
2341232142132321,
1233432112 };
char array[1000];
size_t i, n = sizeof(numbers) / sizeof(numbers[0]), pos = 0;
for (i = 0; i < n && pos < sizeof(array); i++) {
pos += snprintf(array + pos, sizeof(array) - pos, "%"PRIu64"%s", numbers[i],
i < n - 1 ? "," : "");
}
printf("%s\n", array);
return 0;
}
If all the data is available at compile-time, there's no obvious reason why you should use slow run-time conversion functions like s*printf. Simply do it all at pre-processor stage:
#define INIT_LIST \
201234567890123456, \
12345678901234567890, \
98765432109876543, \
65432109887, \
12345234512345, \
217631276371261627, \
12354123512453124, \
2163521442531, \
2341232142132321, \
1233432112
#define STR_(...) #__VA_ARGS__
#define STR(x) STR_(x)
int main (void)
{
uint64_t numbers[10] = { INIT_LIST };
char array[] = STR(INIT_LIST);
puts(array);
}
More advanced alternatives with "X macros" are possible, if you want to micro-manage comma and space placement between numbers etc.
Please note that 12345678901234567890 is too large to be a valid signed integer constant on my 64 bit system, the max is 2^63 - 1 = 9.22*10^18 but this number is 12.34*10^18. I have to change it to 12345678901234567890ull to get this program to compile, since the maximum number is then 18.44*10^18.
You can accomplish this by using sprintf_s in a for loop.
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#define BUFSIZE 22 /* string buffer size to support a single 64 bit unsigned integer */
#define ARRSIZE 10 /* size of the unsigned integer array */
#define STRSIZE BUFSIZE * ARRSIZE /* max size of the string of unsigned integer array */
int main()
{
int i;
char num_str[STRSIZE] = "";
uint64_t num_arr[ARRSIZE] = {
201234567890123456,
12345678901234567890,
98765432109876543,
65432109887,
12345234512345,
2176312763712637162,
12354123512453124,
2163521442531,
2341232142132321,
1233432112
};
for (i = 0; i < ARRSIZE; i++)
{
/* convert an unsigned integer to a string and concatenate to previous string every loop */
sprintf_s(num_str + strlen(num_str), BUFSIZE, "%llu,", num_arr[i]);
}
num_str[strlen(num_str) - 1] = 0; /* truncate the trailing comma */
printf("%s", num_str);
return 0;
}
This results in:
num_str = "201234567890123456,12345678901234567890,98765432109876543,65432109887,12345234512345,2176312763712637162,12354123512453124,2163521442531,2341232142132321,1233432112"
The memcpy function copies byte by byte from one location to another. What you're attempting to do is take 10 bytes of a 64 bit number (which only contains 8 bytes) and reinterpret each of them as an ASCII character. Not only will this not give the results you expect, but you also invoke undefined behavior by reading past the memory bounds of an object.
Guessing at what functions do is a bad way to learn C. You need to read the documentation for these functions (run "man function_name" on Linux for the function in question or search learn.microsoft.com for MSVC) to understand what they do.
If you use the sprintf function, you can convert a 64 bit integer to a string. The %lld identifier accepts a long long int, which is at least as big as a uint64_t.
sprintf(a, "%lld", (unsigned long long)n);
I want to use 128-bit unsigned integer in C. I have written the following code:
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include <stdint.h>
#include <limits.h>
#define unt __uint128_t
#define G1 226854911280625642308916404954512140970
int countSetBits(unt n){
int count = 0;
while(n){ n &= (n-1) ; count++; }
return count;
}
int main(){
printf(" %d\n",countSetBits(G1) );
}
Although output should be 64, number of bits of G1, it is coming 96. I use gcc compiler. I know GMP GNU, but for my purpose, I need fast execution. Hence I want to avoid GNU library.
Because of an issue explained here, you need to assign the constant using two 64 bit values:
#include <stdio.h>
#define uint128_t __uint128_t
#define G1 ((uint128_t)12297829382473034410 << 64 | (uint128_t)12297829382473034410)
int countSetBits(uint128_t n) {
int count = 0;
while(n) {
n &= (n - 1);
count++;
}
return count;
}
int main() {
printf(" %d\n",countSetBits(G1) );
}
Outputs:
64
Live version available in onlinegdb.
There are no 128 constants in C language so you need to use two 64 bit values and combine them
#define unt __uint128_t
#define G1 ((((__uint128_t)0xaaaaaaaaaaaaaaaaull) << 64) + ((__uint128_t)0xaaaaaaaaaaaaaaaaull))
int countSetBits(unt n){
int count = 0;
while(n){ n &= (n-1) ; count++; }
return count;
}
int countSetBits1(unt n){
int count = 0;
while(n)
{
count += n & 1;
n >>= 1;
}
return count;
}
int main(){
printf(" %d\n",countSetBits(G1) );
printf(" %d\n",countSetBits1(G1) );
}
Since you're using one gcc extension, I assume more are okay. gcc has a family of intrinsic functions for returning the number of set bits in regular integer types. Depending on your CPU and gcc options, this will either become the appropriate instruction, or fall back to calling a library function.
Something like:
int bitcount_u128(unsigned __int128 n) {
uint64_t parts[2];
memcpy(parts, &n, sizeof n);
return __builtin_popcountll(parts[0]) + __builtin_popcountll(parts[1]);
}
If using an x86 processor with the popcnt instruction (Which is most made in the last decade), compile with -mpopcnt or the appropriate -march= setting to use the hardware instruction.
Alternatively, if you're okay with limiting support to just x86 processors with popcnt, the _mm_popcnt_u64() intrinsic from <nmmintrin.h> can be used instead of __builtin_popcountll().
The following max function is supposed to return 5 but it returns 4294967294 instead. I suspect the weird behavior arise from casting variables but couldn't figure it out. Can someone detect the fault?
System: Windows 7 (64 bits), mingw64
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <stdarg.h>
#include <inttypes.h>
int64_t max(int64_t n, ...) {
va_list v;
int64_t i, t, max = INT64_MIN;
va_start(v, n);
for (i = 0; i < n; i++) {
t = va_arg(v, int64_t);
if (max < t) {
max = t;
}
}
va_end(v);
return (max);
}
int main(int argc, char **argv) {
printf("max(3, 1, 5, -2) : %3I64d\n", max(3, 1, 5, -2));
return (0);
}
The compiler doesn't know that 1,5 and -2 are supposed to be type int64_t. So it will treat them as normal ints and will only use that much space on the stack for them.
You then read them as int64_t which is certainly larger than int and so your input and your var_args are out of alignment.
One way to fix, cast to int64_t at the call site.
printf("max(3, 1, 5, -2) : %"PRId64"\n", max(3, (int64_t)1, (int64_t)5, (int64_t)-2));
You could also obviously explicitly pass int64_t typed variables.
So I'm working on creating a precise Decimal structure that stores it's characteristic and mantissa within a long and an unsigned long respectfully. Because I'm doing this I've had to come up with my own subtraction and addition functions.
While testing my functions I ran across the troublesome problem of "negative zero". Essentially, I cannot represent -0.1 through -0.9 because there is no way for me to put a negative sign on my zero without just using a flag value of some kind. This is background information and I'll post the code so you can see how I'm doing the arithmetic. The STRANGE behavior though is that I'm getting a number above ULONG_MAX. Specifically this is the output of my log:
diff->right: 18446744073699551616
b->right10000000
MANTISSA_LIMIT: 100000000
ULONG_MAX: 18446744073709551615
Subtracting 10.10000000 from 10.00000000
Test: tests/bin/decimal.out(subtractDecimalsWithCarry+0x79) [0x40109f] Decimal: 0.10000000
And the code:
helpers/decimal.h:
#ifndef __DECIMAL_H__
#include <limits.h>
#define MANTISSA_LIMIT 100000000
#define __DECIMAL_H__
typedef struct{ /* Calling them more convenient terms: */
long left; /* characteristic */
unsigned long right; /* mantissa */
}Decimal;
void createDecimal(long left, unsigned long right, Decimal * dec);
/* Perform arithmetic operations on Decimal structures */
void add_decimals(Decimal* a, Decimal* b, Decimal* sum);
void subtract_decimals(Decimal* a, Decimal* b, Decimal* diff);
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
void createDecimalFromString(Decimal * dec, const char * str);
#endif
And then decimal.c's relavant code:
/* Subtract two decimals, a - b */
void subtract_decimals(Decimal* a, Decimal* b, Decimal* diff){
diff->left = a->left - b->left;
diff->right = a->right - b->right;
fprintf(stderr, "diff->right: %lu\n", diff->right);
fprintf(stderr, "b->right%lu\n", b->right);
fprintf(stderr, "MANTISSA_LIMIT: %d\n", MANTISSA_LIMIT);
fprintf(stderr, "ULONG_MAX: %lu\n", ULONG_MAX);
if(diff->right > MANTISSA_LIMIT) {
if(diff->right != 18446744073699551616UL)
diff->left -= 1;
else
diff->left *= -1; /* This is where I might put a flag for -0*/
diff->right = ULONG_MAX - diff->right + (18446744073699551616UL == diff->right ? 1 : 0); /* +1 because of the wrap around, we have to 'count' 0. */
}
}
void createDecimalFromString(Decimal * dec, const char * str){
long left;
unsigned long right;
char * dotLocation;
char rawLeft[9];
char rawRight[9];
int i;
int dotPos;
long leadingZeros;
int numDetected;
if(str == NULL)
return;
bzero(rawLeft,9);
bzero(rawRight,9);
dotLocation = strstr(str, ".");
leadingZeros = numDetected = 0;
if(dotLocation == NULL){
left = atol(str);
right = 0;
}else{
/* ghetto strncpy */
for(i=0; i != 9 && str[i] != *dotLocation; ++i)
rawLeft[i] = str[i];
rawLeft[i] = '\0';
dotPos = i+1;
left = atol(rawLeft);
for(i=0; i != 9 && str[dotPos] != '\0'; ++i,++dotPos){
if(str[dotPos] == '0' && numDetected == 0)
leadingZeros++;
else
numDetected = 1;
rawRight[i] = str[dotPos];
}
rawRight[i] = '\0';
right = strtoul(rawRight,NULL,10);
if(leadingZeros > 0)
/* subtract the leading zeros, then also the powers of ten taken by the number itself*/
right = (right*(powlu(10,7-leadingZeros-(i-2))));
else
right = right*(powlu(10,(i > 1 ? 8-(i-1) : 7 )));
}
dec->left = left;
dec->right = right;
}
And finally the calling code:
#include <stdio.h>
#include <stdlib.h>
#include <execinfo.h>
#include <unistd.h>
#include "helpers/decimal.h"
void traceAndPrintDecimal(Decimal testDec){
int nptrs;
void *buffer[100];
char **strings;
nptrs = backtrace(buffer, 100);
strings = backtrace_symbols(buffer, nptrs);
printf("Test: %s Decimal: %ld.%08lu\n", strings[1], testDec.left, testDec.right);
free(strings);
}
void subtractDecimalsWithCarry(){
Decimal oper1;
Decimal oper2;
Decimal result;
createDecimalFromString(&oper1, "10.0");
createDecimalFromString(&oper2, "10.1");
subtract_decimals(&oper1, &oper2, &result);
printf("Subtracting %ld.%08lu from %ld.%08lu\n",oper2.left,oper2.right,oper1.left,oper1.right);
traceAndPrintDecimal(result);
}
int main(){
subtractDecimalsWithCarry();
return 0;
}
And the piece of my makefile for compiling:
decimal.o: src/helpers/decimal.c
cc -I./headers -std=gnu99 -pedantic -Wall -Wextra -Werror -g -c src/helpers/decimal.c -o obj/decimal.o
test-decimal: tests/decimal-test.c decimal.o
cc -I./headers -std=gnu99 -pedantic -Wall -Wextra -Werror -g tests/decimal-test.c obj/decimal.o -o tests/bin/decimal.out -lm -rdynamic
It's strange that diff->right is larger than ULONG_MAX, does anyone know why this might be? If you need anymore information let me know and I'll do my best to update the question.
Mistaken "Number above ULONG_MAX".
At first glance diff->right with the value "18446744073699551616" appeared to be larger than ULONG_MAX ("18446744073709551615"). But is 9999999 less. (#UncleO)
OP's asserts in comment "any idea why the ones place goes a little cockeyed? The number should only be off by 1000000 because of the way the mantissa works. But it's off by 10000001 instead". Suggest this is incorrect.
// from createDecimalFromString(&oper1, "10.0");
oper1.right = 0
// from createDecimalFromString(&oper2, "10.1");
oper1.right = 10000000
// from subtract_decimals(&oper1, &oper2, &result)
diff->right = oper1.right - oper2.right --> 18446744073699551616
unsigned subtraction is well defined in C. In this case the difference oper1.right - oper2.right will mathematically result in oper1.right - oper1.right + (ULONG_MAX + 1).
" ... a result that cannot be represented by the resulting unsigned integer type is reduced modulo the number that is one greater than the largest value that can be represented by the resulting type." C11 6.2.5 8
I have a count register, which is made up of two 32-bit unsigned integers, one for the higher 32 bits of the value (most significant word), and other for the lower 32 bits of the value (least significant word).
What is the best way in C to combine these two 32-bit unsigned integers and then display as a large number?
In specific:
leastSignificantWord = 4294967295; //2^32-1
printf("Counter: %u%u", mostSignificantWord,leastSignificantWord);
This would print fine.
When the number is incremented to 4294967296, I have it so the leastSignificantWord wipes to 0, and mostSignificantWord (0 initially) is now 1. The whole counter should now read 4294967296, but right now it just reads 10, because I'm just concatenating 1 from mostSignificantWord and 0 from leastSignificantWord.
How should I make it display 4294967296 instead of 10?
It might be advantageous to use unsigned integers with explicit sizes in this case:
#include <stdio.h>
#include <inttypes.h>
int main(void) {
uint32_t leastSignificantWord = 0;
uint32_t mostSignificantWord = 1;
uint64_t i = (uint64_t) mostSignificantWord << 32 | leastSignificantWord;
printf("%" PRIu64 "\n", i);
return 0;
}
Output
4294967296
Break down of (uint64_t) mostSignificantWord << 32 | leastSignificantWord
(typename) does typecasting in C. It changes value data type to typename.
(uint64_t) 0x00000001 -> 0x0000000000000001
<< does left shift. In C left shift on unsigned integers performs logical shift.
0x0000000000000001 << 32 -> 0x0000000100000000
| does 'bitwise or' (logical OR on bits of the operands).
0b0101 | 0b1001 -> 0b1101
long long val = (long long) mostSignificantWord << 32 | leastSignificantWord;
printf( "%lli", val );
my take:
unsigned int low = <SOME-32-BIT-CONSTRANT>
unsigned int high = <SOME-32-BIT-CONSTANT>
unsigned long long data64;
data64 = (unsigned long long) high << 32 | low;
printf ("%llx\n", data64); /* hexadecimal output */
printf ("%lld\n", data64); /* decimal output */
Another approach:
unsigned int low = <SOME-32-BIT-CONSTRANT>
unsigned int high = <SOME-32-BIT-CONSTANT>
unsigned long long data64;
unsigned char * ptr = (unsigned char *) &data;
memcpy (ptr+0, &low, 4);
memcpy (ptr+4, &high, 4);
printf ("%llx\n", data64); /* hexadecimal output */
printf ("%lld\n", data64); /* decimal output */
Both versions work, and they will have similar performance (the compiler will optimize the memcpy away).
The second version does not work with big-endian targets but otoh it takes the guess-work away if the constant 32 should be 32 or 32ull. Something I'm never sure when I see shifts with constants greater than 31.
There's another way using arrays and pointers:
#include <stdio.h>
#include <inttypes.h>
int main(void) {
// Two uint32_t to one uint64_t
uint32_t val1[2] = {1000, 90000};
uint64_t *val1_u64_ptr = (uint64_t*)val1; //intermediate pointer cast to avoid Wstrict-aliasing warnings
uint64_t val2 = *val1_u64_ptr;
printf("val2: %" PRIu64 "\n", val2);
// val2: 386547056641000
// back to uint32_t array from uint64_t
uint64_t val3 = 386547056641000ull;
uint32_t *val4 = (uint32_t*)&val3;
printf("val4: %" PRIu32 ", %" PRIu32 "\n", val4[0], val4[1]);
// val4: 1000, 90000
return 0;
}
This code for me is much easier to understand and read. You are just creating a contiguous space in memory with two 32-bit unsigned int and then this same memory space is read as a single 64-bit unsigned int value and vice-versa. There are no operations involved only memory being read as different types.
EDIT
Forgot to mention that this is great if you already have a 64-bit array read from somewhere then you could easily read everything as 32-bit array pairs:
#include <stdio.h>
#include <inttypes.h>
int main() {
uint64_t array64[] = {
386547056641000ull,
93929935171414ull,
186655006591110ull,
73141496240875ull,
161460097995400ull,
351282298325439ull,
97310615654411ull,
104561732955680ull,
383587691986172ull,
386547056641000ull
};
int n_items = sizeof(array64) / sizeof(array64[0]);
uint32_t* array32 = (uint32_t*)&array64;
for (int ii = 0; ii < n_items * 2; ii += 2) {
printf("[%" PRIu32 ", %" PRIu32 "]\n", array32[ii], array32[ii + 1]);
}
return 0;
}
Output:
[1000, 90000]
[3295375190, 21869]
[22874246, 43459]
[2498157291, 17029]
[3687404168, 37592]
[1218152895, 81789]
[3836596235, 22656]
[754134560, 24345]
[4162780412, 89310]
[1000, 90000]
Using union struct
Still better and more readable would be to use a struct union as from https://stackoverflow.com/a/2810339/2548351:
#include <stdio.h>
#include <inttypes.h>
typedef union {
int64_t big;
struct {
int32_t x;
int32_t y;
};
} xy_t;
int main() {
// initialize from 64-bit
xy_t value = {386547056641000ull};
printf("[%" PRIu32 ",%" PRIu32 "]\n", value.x, value.y);
// [1000, 90000]
// initialize as two 32-bit
xy_t value2 = {.x = 1000, .y = 90000};
printf("%" PRIu64, value.big);
// 386547056641000
return 0;
}
Instead of attempting to print decimal, I often print in hex.
Thus ...
printf ("0x%x%08x\n", upper32, lower32);
Alternatively, depending upon the architecture, platform and compiler, sometimes you can get away with something like ...
printf ("%lld\n", lower32, upper32);
or
printf ("%lld\n", upper32, lower32);
However, this alternative method is very machine dependent (endian-ness, as well as 64 vs 32 bit, ...) and in general is not recommended.
Hope this helps.
This code works when both upper32 and lower32 is negative:
data64 = ((LONGLONG)upper32<< 32) | ((LONGLONG)lower32& 0xffffffff);
You could do it by writing the 32-bit values to the right locations in memory:
unsigned long int data64;
data64=lowerword
*(&((unsigned int)data64)+1)=upperword;
This is machine-dependent however, for example it won't work correctly on big-endian processors.
Late at the game, but I needed such a thing similar to represent a numerical base10 string of a 64bit integer on a 32bit embedded env..
So, inspired by Link I wrote this code that can do what asked in the question, but not limiting on base10: can convert to any base from 2 to 10, and can be easly extended to base N.
void stringBaseAdd(char *buf, unsigned long add, int base){
char tmp[65], *p, *q;
int l=strlen(buf);
int da1, da2, dar;
int r;
tmp[64]=0;
q=&tmp[64];
p=&buf[l-1];
r=0;
while(add && p>=buf){
da1=add%base;
add/=base;
da2=*p-'0';
dar=da1+da2+r;
r=(dar>=base)? dar/base: 0;
*p='0'+(dar%base);
--p;
}
while(add){
da1=add%base;
add/=base;
dar=da1+r;
r=(dar>=base)? dar/base: 0;
--q;
*q='0'+(dar%base);
}
while(p>=buf && r){
da2=*p-'0';
dar=da2+r;
r=(dar>=base)? 1: 0;
*p='0'+(dar%base);
--p;
}
if(r){
--q;
*q='0'+r;
}
l=strlen(q);
if(l){
memmove(&buf[l], buf, strlen(buf)+1);
memcpy(buf, q, l);
}
}
void stringBaseDouble(char *buf, int base){
char *p;
int l=strlen(buf);
int da1, dar;
int r;
p=&buf[l-1];
r=0;
while(p>=buf){
da1=*p-'0';
dar=(da1<<1)+r;
r=(dar>=base)? 1: 0;
*p='0'+(dar%base);
--p;
}
if(r){
memmove(&buf[1], buf, strlen(buf)+1);
*buf='1';
}
}
void stringBaseInc(char *buf, int base){
char *p;
int l=strlen(buf);
int da1, dar;
int r;
p=&buf[l-1];
r=1;
while(p>=buf && r){
da1=*p-'0';
dar=da1+r;
r=(dar>=base)? 1: 0;
*p='0'+(dar%base);
--p;
}
if(r){
memmove(&buf[1], buf, strlen(buf)+1);
*buf='1';
}
}
void stringLongLongInt(char *buf, unsigned long h, unsigned long l, int base){
unsigned long init=l;
int s=0, comb=0;
if(h){
comb=1;
init=h;
while(!(init&0x80000000L)){
init<<=1;
init|=(l&0x80000000L)? 1: 0;
l<<=1;
s++;
}
}
buf[0]='0';
buf[1]=0;
stringBaseAdd(buf, init, base);
if(comb){
l>>=s;
h=0x80000000L>>s;
s=sizeof(l)*8-s;
while(s--){
stringBaseDouble(buf, base);
if(l&h)
stringBaseInc(buf, base);
h>>=1;
}
}
}
If you ask for
char buff[20];
stringLongLongInt(buff, 1, 0, 10);
your buff will contain 4294967296