In C, why is "signed int" faster than "unsigned int"? - c

In C, why is signed int faster than unsigned int? True, I know that this has been asked and answered multiple times on this website (links below). However, most people said that there is no difference. I have written code and accidentally found a significant performance difference.
Why would the "unsigned" version of my code be slower than the "signed" version (even when testing the same number)? (I have a x86-64 Intel processor).
Similar links
Faster comparing signed than unsigned ints
performance of unsigned vs signed integers
Compile Command: gcc -Wall -Wextra -pedantic -O3 -Wl,-O3 -g0 -ggdb0 -s -fwhole-program -funroll-loops -pthread -pipe -ffunction-sections -fdata-sections -std=c11 -o ./test ./test.c && strip --strip-all --strip-unneeded --remove-section=.note --remove-section=.comment ./test
signed int version
NOTE: There is no difference if I explicitly declare signed int on all numbers.
int isprime(int num) {
// Test if a signed int is prime
int i;
if (num % 2 == 0 || num % 3 == 0)
return 0;
else if (num % 5 == 0 || num % 7 == 0)
return 0;
else {
for (i = 11; i < num; i += 2) {
if (num % i == 0) {
if (i != num)
return 0;
else
return 1;
}
}
}
return 1;
}
unsigned int version
int isunsignedprime(unsigned int num) {
// Test if an unsigned int is prime
unsigned int i;
if (num % (unsigned int)2 == (unsigned int)0 || num % (unsigned int)3 == (unsigned int)0)
return 0;
else if (num % (unsigned int)5 == (unsigned int)0 || num % (unsigned int)7 == (unsigned int)0)
return 0;
else {
for (i = (unsigned int)11; i < num; i += (unsigned int)2) {
if (num % i == (unsigned int)0) {
if (i != num)
return 0;
else
return 1;
}
}
}
return 1;
}
Test this in a file with the below code:
int main(void) {
printf("%d\n", isprime(294967291));
printf("%d\n", isprime(294367293));
printf("%d\n", isprime(294967293));
printf("%d\n", isprime(294967241)); // slow
printf("%d\n", isprime(294967251));
printf("%d\n", isprime(294965291));
printf("%d\n", isprime(294966291));
printf("%d\n", isprime(294963293));
printf("%d\n", isprime(294927293));
printf("%d\n", isprime(294961293));
printf("%d\n", isprime(294917293));
printf("%d\n", isprime(294167293));
printf("%d\n", isprime(294267293));
printf("%d\n", isprime(294367293)); // slow
printf("%d\n", isprime(294467293));
return 0;
}
Results (time ./test):
Signed - real 0m0.949s
Unsigned - real 0m1.174s

Your question is genuinely intriguing as the unsigned version consistently produces code that is 10 to 20% slower. Yet there are multiple problems in the code:
Both functions return 0 for 2, 3, 5 and 7, which is incorrect.
The test if (i != num) return 0; else return 1; is completely useless as the loop body is only run for i < num. Such a test would be useful for the small prime tests but special casing them is not really useful.
the casts in the unsigned version are redundant.
benchmarking code that produces textual output to the terminal is unreliable, you should use the clock() function to time CPU intensive functions without any intervening I/O.
the algorithm for prime testing is utterly inefficient as the loop runs num / 2 times instead of sqrt(num).
Let's simplify the code and run some precise benchmarks:
#include <stdio.h>
#include <time.h>
int isprime_slow(int num) {
if (num % 2 == 0)
return num == 2;
for (int i = 3; i < num; i += 2) {
if (num % i == 0)
return 0;
}
return 1;
}
int unsigned_isprime_slow(unsigned int num) {
if (num % 2 == 0)
return num == 2;
for (unsigned int i = 3; i < num; i += 2) {
if (num % i == 0)
return 0;
}
return 1;
}
int isprime_fast(int num) {
if (num % 2 == 0)
return num == 2;
for (int i = 3; i * i <= num; i += 2) {
if (num % i == 0)
return 0;
}
return 1;
}
int unsigned_isprime_fast(unsigned int num) {
if (num % 2 == 0)
return num == 2;
for (unsigned int i = 3; i * i <= num; i += 2) {
if (num % i == 0)
return 0;
}
return 1;
}
int main(void) {
int a[] = {
294967291, 0, 294367293, 0, 294967293, 0, 294967241, 1, 294967251, 0,
294965291, 0, 294966291, 0, 294963293, 0, 294927293, 1, 294961293, 0,
294917293, 0, 294167293, 0, 294267293, 0, 294367293, 0, 294467293, 0,
};
struct testcase { int (*fun)(); const char *name; int t; } test[] = {
{ isprime_slow, "isprime_slow", 0 },
{ unsigned_isprime_slow, "unsigned_isprime_slow", 0 },
{ isprime_fast, "isprime_fast", 0 },
{ unsigned_isprime_fast, "unsigned_isprime_fast", 0 },
};
for (int n = 0; n < 4; n++) {
clock_t t = clock();
for (int i = 0; i < 30; i += 2) {
if (test[n].fun(a[i]) != a[i + 1]) {
printf("%s(%d) != %d\n", test[n].name, a[i], a[i + 1]);
}
}
test[n].t = clock() - t;
}
for (int n = 0; n < 4; n++) {
printf("%21s: %4d.%03dms\n", test[n].name, test[n].t / 1000), test[n].t % 1000);
}
return 0;
}
The code compiled with clang -O2 on OS/X produces this output:
isprime_slow: 788.004ms
unsigned_isprime_slow: 965.381ms
isprime_fast: 0.065ms
unsigned_isprime_fast: 0.089ms
These timings are consistent with the OP's observed behavior on a different system, but show the dramatic improvement caused by the more efficient iteration test: 10000 times faster!
Regarding the question Why is the function slower with unsigned?, let's look at the generated code (gcc 7.2 -O2):
isprime_slow(int):
...
.L5:
movl %edi, %eax
cltd
idivl %ecx
testl %edx, %edx
je .L1
.L4:
addl $2, %ecx
cmpl %esi, %ecx
jne .L5
.L6:
movl $1, %edx
.L1:
movl %edx, %eax
ret
unsigned_isprime_slow(unsigned int):
...
.L19:
xorl %edx, %edx
movl %edi, %eax
divl %ecx
testl %edx, %edx
je .L22
.L18:
addl $2, %ecx
cmpl %esi, %ecx
jne .L19
.L20:
movl $1, %eax
ret
...
.L22:
xorl %eax, %eax
ret
The inner loops are very similar, same number of instructions, similar instructions. Here are however some potential explanations:
cltd extends the sign of the eax register into the edx register, which may be causing an instruction delay because eax is modified by the immediately preceeding instruction movl %edi, %eax. Yet this would make the signed version slower than the unsigned one, not faster.
the loops' initial instructions might be misaligned for the unsigned version, but it is unlikely as changing the order in the source code has no effect on the timings.
Although the register contents are identical for the signed and unsigned division opcodes, it is possible that the idivl instruction take fewer cycles than the divl instruction. Indeed the signed division operates on one less bit of precision than the unsigned division, but the difference seems quite large for this small change.
I suspect more effort was put into the silicon implementation of idivl because signed divisions are more common that unsigned divisions (as measured by years of coding statistics at Intel).
as commented by rcgldr, looking at instruction tables for Intel process, for Ivy Bridge, DIV 32 bit takes 10 micro ops, 19 to 27 cycles, IDIV 9 micro ops, 19 to 26 cycles. The benchmark times are consistent with these timings. The extra micro-op may be due to the longer operands in DIV (64/32 bits) as opposed to IDIV (63/31 bits).
This surprising result should teach us a few lessons:
optimizing is a difficult art, be humble and procrastinate.
correctness is often broken by optimizations.
choosing a better algorithm beats optimization by a long shot.
always benchmark code, do not trust your instincts.

Because signed integer overflow is undefined, the compiler can make a lot of assumptions and optimizations on code involving signed integers. Unsigned integer overflow is defined to wrap around, so the compiler won't be able to optimize as much. See also http://blog.llvm.org/2011/05/what-every-c-programmer-should-know.html#signed_overflow and http://www.airs.com/blog/archives/120.

From Instruction specification on AMD/Intel we have (for K7):
Instruction Ops Latency Throughput
DIV r32/m32 32 24 23
IDIV r32 81 41 41
IDIV m32 89 41 41
For i7, latency and throughput are the same for IDIVL and DIVL, a slight difference exists for the µops.
This may explain the difference as -O3 assembly codes only differ by signedness (DIVL vs IDIVL) on my machine.

Alternative wiki candidate test that may/may not show a significant time difference.
#include <stdio.h>
#include <time.h>
#define J 10
#define I 5
int main(void) {
clock_t c1,c2,c3;
for (int j=0; j<J; j++) {
c1 = clock();
for (int i=0; i<I; i++) {
isprime(294967241);
isprime(294367293);
}
c2 = clock();
for (int i=0; i<I; i++) {
isunsignedprime(294967241);
isunsignedprime(294367293);
}
c3 = clock();
printf("%d %d %d\n", (int)(c2-c1), (int)(c3-c2), (int)((c3-c2) - (c2-c1)));
fflush(stdout);
}
return 0;
}
Sample output
2761 2746 -15
2777 2777 0
2761 2745 -16
2793 2808 15
2792 2730 -62
2746 2730 -16
2746 2730 -16
2776 2793 17
2823 2808 -15
2793 2823 30

In fact in many cases unsigned is faster than signed for eample
In dividing by a power of 2
unsigned int x=37;
cout<<x/4;
In checking if a number if even
unsigned int x=37;
cout<<(x%2==0)?"even":"odd";

Related

Modulo operator slower than manual implementation?

I have found that manually calculating the % operator on __int128 is significantly faster than the built-in compiler operator. I will show you how to calculate modulo 9, but the method can be used to calculate modulo any other number.
First, consider the built-in compiler operator:
uint64_t mod9_v1(unsigned __int128 n)
{
return n % 9;
}
Now consider my manual implementation:
uint64_t mod9_v2(unsigned __int128 n)
{
uint64_t r = 0;
r += (uint32_t)(n);
r += (uint32_t)(n >> 32) * (uint64_t)4;
r += (uint32_t)(n >> 64) * (uint64_t)7;
r += (uint32_t)(n >> 96);
return r % 9;
}
Measuring over 100,000,000 random numbers gives the following results:
mod9_v1 | 3.986052 secs
mod9_v2 | 1.814339 secs
GCC 9.3.0 with -march=native -O3 was used on AMD Ryzen Threadripper 2990WX.
Here is a link to godbolt.
I would like to ask if it behaves the same way on your side?
(Before reporting a bug to GCC Bugzilla).
UPDATE:
On request, I supply a generated assembly:
mod9_v1:
sub rsp, 8
mov edx, 9
xor ecx, ecx
call __umodti3
add rsp, 8
ret
mod9_v2:
mov rax, rdi
shrd rax, rsi, 32
mov rdx, rsi
mov r8d, eax
shr rdx, 32
mov eax, edi
add rax, rdx
lea rax, [rax+r8*4]
mov esi, esi
lea rcx, [rax+rsi*8]
sub rcx, rsi
mov rax, rcx
movabs rdx, -2049638230412172401
mul rdx
mov rax, rdx
shr rax, 3
and rdx, -8
add rdx, rax
mov rax, rcx
sub rax, rdx
ret
The reason for this difference is clear from the assembly listings: the % operator applied to 128-bit integers is implemented via a library call to a generic function that cannot take advantage of compile time knowledge of the divisor value, which makes it possible to turn division and modulo operations into much faster multiplications.
The timing difference is even more significant on my old Macbook-pro using clang, where I mod_v2() is x15 times faster than mod_v1().
Note however these remarks:
you should measure the cpu time just after the end of the for loop, not after the first printf as currently coded.
rand_u128() only produces 124 bits assuming RAND_MAX is 0x7fffffff.
most of the time is spent computing the random numbers.
Using your slicing approach, I extended you code to reduce the number of steps using slices of 42, 42 and 44 bits, which further improves the timings (because 242 % 9 == 1):
#pragma GCC diagnostic ignored "-Wpedantic"
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <assert.h>
#include <inttypes.h>
#include <stdio.h>
#include <time.h>
static uint64_t mod9_v1(unsigned __int128 n) {
return n % 9;
}
static uint64_t mod9_v2(unsigned __int128 n) {
uint64_t r = 0;
r += (uint32_t)(n);
r += (uint32_t)(n >> 32) * (uint64_t)(((uint64_t)1ULL << 32) % 9);
r += (uint32_t)(n >> 64) * (uint64_t)(((unsigned __int128)1 << 64) % 9);
r += (uint32_t)(n >> 96);
return r % 9;
}
static uint64_t mod9_v3(unsigned __int128 n) {
return (((uint64_t)(n >> 0) & 0x3ffffffffff) +
((uint64_t)(n >> 42) & 0x3ffffffffff) +
((uint64_t)(n >> 84))) % 9;
}
unsigned __int128 rand_u128() {
return ((unsigned __int128)rand() << 97 ^
(unsigned __int128)rand() << 66 ^
(unsigned __int128)rand() << 35 ^
(unsigned __int128)rand() << 4 ^
(unsigned __int128)rand());
}
#define N 100000000
int main() {
srand(42);
unsigned __int128 *arr = malloc(sizeof(unsigned __int128) * N);
if (arr == NULL) {
return 1;
}
for (size_t n = 0; n < N; ++n) {
arr[n] = rand_u128();
}
#if 1
/* check that modulo 9 is calculated correctly */
for (size_t n = 0; n < N; ++n) {
uint64_t m = mod9_v1(arr[n]);
assert(m == mod9_v2(arr[n]));
assert(m == mod9_v3(arr[n]));
}
#endif
clock_t clk1 = -clock();
uint64_t sum1 = 0;
for (size_t n = 0; n < N; ++n) {
sum1 += mod9_v1(arr[n]);
}
clk1 += clock();
clock_t clk2 = -clock();
uint64_t sum2 = 0;
for (size_t n = 0; n < N; ++n) {
sum2 += mod9_v2(arr[n]);
}
clk2 += clock();
clock_t clk3 = -clock();
uint64_t sum3 = 0;
for (size_t n = 0; n < N; ++n) {
sum3 += mod9_v3(arr[n]);
}
clk3 += clock();
printf("mod9_v1: sum=%"PRIu64", elapsed time: %.3f secs\n", sum1, clk1 / (double)CLOCKS_PER_SEC);
printf("mod9_v2: sum=%"PRIu64", elapsed time: %.3f secs\n", sum2, clk2 / (double)CLOCKS_PER_SEC);
printf("mod9_v3: sum=%"PRIu64", elapsed time: %.3f secs\n", sum3, clk3 / (double)CLOCKS_PER_SEC);
free(arr);
return 0;
}
Here are the timings on my linux server (gcc):
mod9_v1: sum=400041273, elapsed time: 7.992 secs
mod9_v2: sum=400041273, elapsed time: 1.295 secs
mod9_v3: sum=400041273, elapsed time: 1.131 secs
The same code on my Macbook (clang):
mod9_v1: sum=399978071, elapsed time: 32.900 secs
mod9_v2: sum=399978071, elapsed time: 0.204 secs
mod9_v3: sum=399978071, elapsed time: 0.185 secs
In the mean time (while waiting for Bugzilla), you could let the preprocessor do the optimization for you. E.g. define a macro called MOD_INT128(n,d) :
#define MODCALC0(n,d) ((65536*n)%d)
#define MODCALC1(n,d) MODCALC0(MODCALC0(n,d),d)
#define MODCALC2(n,d) MODCALC1(MODCALC1(n,d),d)
#define MODCALC3(n,d) MODCALC2(MODCALC1(n,d),d)
#define MODPARAM(n,d,a,b,c) \
((uint64_t)((uint32_t)(n) ) + \
(uint64_t)((uint32_t)(n >> 32) * (uint64_t)a) + \
(uint64_t)((uint32_t)(n >> 64) * (uint64_t)b) + \
(uint64_t)((uint32_t)(n >> 96) * (uint64_t)c) ) % d
#define MOD_INT128(n,d) MODPARAM(n,d,MODCALC1(1,d),MODCALC2(1,d),MODCALC3(1,d))
Now,
uint64_t mod9_v3(unsigned __int128 n)
{
return MOD_INT128( n, 9 );
}
will generate similar assembly language as the mod9_v2() function, and
uint64_t mod8_v3(unsigned __int128 n)
{
return MOD_INT128( n, 8 );
}
works fine with already existing optimization (GCC 10.2.0)

x86 assembly code confusion

We've just begun the topic on assembly and I've been stuck on this problem for the longest time. I have to convert assembly to C code given the following:
C Code:
int foo(int *a, int n, int val) {
int i;
for (i = _________; ____________________________ ; i =___________) {
;
}
return i;
}
Assembly:
// what I've gathered so far
foo()
:
foo:
pushl %ebp
movl %esp,%ebp
movl 8(%ebp),%ecx // ecx: a
movl 16(%ebp),%edx // edx: val
movl 12(%ebp),%eax // eax: n
decl %eax // n = n--
js .L3 // if n < 0 goto done
.L7: // loop
cmpl %edx,(%ecx,%eax,4) // I don't understand how you would compute the
// address for (%ecx,%eax,4) I know it would be %ecx + %eax*4 = %ecx + eax << 2
jne .L3 // if (%ecx, %eax, 4) != val goto done (?)
decl %eax // n = n--
jns .L7 // if (n >= 0) jump to loop
.L3: // done
movl %ebp,%esp
popl %ebp
ret
I don't know how to figure out what i is being initialized to and what the body of the loop is. I'm assuming i = n since n serves as the update. It seems as if there are two conditions one being n > 0 and the other being the cmpl line. Please correct me if my understanding of the code is incorrect, and any clues to this problem is much appreciated.
I could have done some off-by 1 errors, but basically it is this:
int foo(int *a, int n, int val) {
int i;
for (i = n - 1; i >= 0 && a[i] == val; i = i - 1) {
;
}
return i;
}
The i is the %eax register; it loops from n - 1 to 0. The cmpl indexed access (%ecx,%eax,4) is addressed in bytes - this is equivalent to a[i], as size of int on ia32 is 4 bytes. The 4 bytes addressed thus is compared against val.
The %eax is implicitly returned.
Notice also, that js means < 0, and jns >= 0.
Another way to write it:
i = n;
i --; // decl %eax
if (i < 0) {
goto L3; // js .L3
}
L7:
if (a[i] != val) // cmpl %edx,(%ecx,%eax,4)
goto L3; // jne .L3
i --; // decl %eax
if (i >= 0)
goto L7; // jns .L7
L3:
return i;
An alternative using the preprocessor:
#define _________ n - 1
#define ____________________________ i >= 0 && a[i] == val
#define ___________ i + 1
int foo(int *a, int n, int val) {
int i;
for (i = _________; ____________________________ ; i =___________) {
;
}
return i;
}
Of course you can only use this for fun or to tease new programmers ;-)

Is it better to avoid using the mod operator when possible?

I assume that calculating the modulus of a number is a somewhat expensive operation, at least compared to simple arithmetic tests (such as seeing if a number exceeds the length of an array). If this is indeed the case, is it more efficient to replace, for example, the following code:
res = array[(i + 1) % len];
with the following? :
res = array[(i + 1 == len) ? 0 : i + 1];
The first one is easier on the eyes, but I wonder if the second might be more efficient. If so, might I expect an optimizing compiler to replace the first snippet with the second when a compiled language is used?
Of course, this "optimization" (if it is indeed an optimization) doesn't work in all cases (in this case, it only works if i+1 is never more than len).
My general advice is as follows. Use whichever version you think is easier on the eye, and then profile your entire system. Only optimize those parts of the code that the profiler flags up as bottlenecks. I'll bet my bottom dollar that the modulo operator isn't going to be among them.
As far as the specific example goes, only benchmarking can tell which is faster on your specific architecture using your specific compiler. You are potentially replacing modulo with branching, and it's anything but obvious which would be faster.
Some simple measurement:
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[])
{
int test = atoi(argv[1]);
int divisor = atoi(argv[2]);
int iterations = atoi(argv[3]);
int a = 0;
if (test == 0) {
for (int i = 0; i < iterations; i++)
a = (a + 1) % divisor;
} else if (test == 1) {
for (int i = 0; i < iterations; i++)
a = a + 1 == divisor ? 0 : a + 1;
}
printf("%d\n", a);
}
Compiling with either gcc or clang with -O3, and running time ./a.out 0 42 1000000000 (modulo version) or time ./a.out 1 42 1000000000 (comparison version) results in
6.25 seconds user runtime for the modulo version,
1.03 seconds for the comparison version.
(using gcc 5.2.1 or clang 3.6.2; Intel Core i5-4690K # 3.50GHz; 64-bit Linux)
This means that it is probably a good idea to use the comparison version.
Well, have a look at 2 ways to get the next value of a "modulo 3" cyclic counter.
int next1(int n) {
return (n + 1) % 3;
}
int next2(int n) {
return n == 2 ? 0 : n + 1;
}
I've compiled it with gcc -O3 option (for the common x64 architecture), and -s to get the assembly code.
The code for the first function does some unexplainable magic (*) to avoid a division, using a multiplication anyway:
addl $1, %edi
movl $1431655766, %edx
movl %edi, %eax
imull %edx
movl %edi, %eax
sarl $31, %eax
subl %eax, %edx
leal (%rdx,%rdx,2), %eax
subl %eax, %edi
movl %edi, %eax
ret
And is much longer (and I bet slower) than the second function:
leal 1(%rdi), %eax
cmpl $2, %edi
movl $0, %edx
cmove %edx, %eax
ret
So it is not always true that "the (modern) compiler does a better job than you anyway".
Interestingly, the same experiment with 4 instead of 3 leads to a and-masking for the first function
addl $1, %edi
movl %edi, %edx
sarl $31, %edx
shrl $30, %edx
leal (%rdi,%rdx), %eax
andl $3, %eax
subl %edx, %eax
ret
but it is still, and by large, inferior to the second version.
Being more explicit about proper ways to do the things
int next3(int n) {
return (n + 1) & 3;;
}
yields much better results :
leal 1(%rdi), %eax
andl $3, %eax
ret
(*) well, not that complicated. Multiplication by reciprocical. Compute the integer constant K = (2^N)/3, for some large enough value of N. Now, when you want the value of X/3, instead of a division by 3, compute X*K, and shift it N positions to the right.
Here is some additional benchmark. Note that I also added a branchless version:
#include <iostream>
#include <array>
#include <algorithm>
#include <random>
#include <chrono>
using namespace std::chrono;
constexpr size_t iter = 1e8;
int main() {
std::minstd_rand rnd_engine{1234};
std::uniform_int_distribution<int> dist {-1000, 1000};
auto gen = [&]() { return dist(rnd_engine); };
std::array<int, 10> a;
std::generate( a.begin(), a.end(), gen);
for (size_t size = 2; size < 10; size++) {
std::cout << "Modulus size = " << size << '\n';
{
std::cout << "operator% ";
long sum = 0;
size_t x = 0;
auto start = high_resolution_clock::now();
for (size_t i = 0; i < iter; ++i) {
sum += a[x];
x = (x + 1) % size;
}
auto stop = high_resolution_clock::now();
std::cout << duration_cast<microseconds>(stop - start).count()*0.001
<< "ms\t(sum = " << sum << ")\n";
}
{
std::cout << "ternary ";
long sum = 0;
size_t x = 0;
auto start = high_resolution_clock::now();
for (size_t i = 0; i < iter; ++i) {
sum += a[x];
x = ((x + 1) == size) ? 0 : x + 1;
}
auto stop = high_resolution_clock::now();
std::cout << duration_cast<microseconds>(stop - start).count()*0.001
<< "ms\t(sum = " << sum << ")\n";
}
{
std::cout << "branchless ";
long sum = 0;
size_t x = 1;
auto start = high_resolution_clock::now();
for (size_t i = 0; i < iter; ++i) {
sum += a[x-1];
x = ( x != size ) * x + 1;
}
auto stop = high_resolution_clock::now();
std::cout << duration_cast<microseconds>(stop - start).count()*0.001
<< "ms\t(sum = " << sum << ")\n";
}
}
return 0;
}
And here is the output on my i7-4870HQ
$ g++ -Ofast test.cpp && ./a.out
Modulus size = 2
operator% 904.249ms (sum = -4200000000)
ternary 137.04ms (sum = -4200000000)
branchless 169.182ms (sum = -4200000000)
Modulus size = 3
operator% 914.911ms (sum = -31533333963)
ternary 113.384ms (sum = -31533333963)
branchless 167.614ms (sum = -31533333963)
Modulus size = 4
operator% 877.3ms (sum = -36250000000)
ternary 97.265ms (sum = -36250000000)
branchless 167.215ms (sum = -36250000000)
Modulus size = 5
operator% 891.295ms (sum = -30700000000)
ternary 88.562ms (sum = -30700000000)
branchless 167.087ms (sum = -30700000000)
Modulus size = 6
operator% 903.644ms (sum = -39683333196)
ternary 83.433ms (sum = -39683333196)
branchless 167.778ms (sum = -39683333196)
Modulus size = 7
operator% 908.096ms (sum = -34585713678)
ternary 79.703ms (sum = -34585713678)
branchless 166.849ms (sum = -34585713678)
Modulus size = 8
operator% 869ms (sum = -39212500000)
ternary 76.972ms (sum = -39212500000)
branchless 167.29ms (sum = -39212500000)
Modulus size = 9
operator% 875.003ms (sum = -36500000580)
ternary 75.011ms (sum = -36500000580)
branchless 172.356ms (sum = -36500000580)
In this particular case the ternary operator looks far superior, and it becomes even more like so when the branch predictor ramps up. Note however that this is a very particular case: if we were not incrementing the index by non-const value, using the more general operator% would be straightforward while the other two methods could become very intricated.
I would like to stress this very much underrated comment:
if len is a compile-time constant a recent GCC compiler (with -02) is
usually doing clever things, often avoiding the modulus machine
instruction of the target processor. – Basile Starynkevitch
For instance by removing the loop on the size variable and declaring it as const size_t size = 4; I get:
g++ -Ofast test.cpp && ./a.out
Modulus size = 4
operator% 62.103ms (sum = -36250000000)
ternary 93.674ms (sum = -36250000000)
branchless 166.774ms (sum = -36250000000)
Conclusions
The execution time of the branchless version is pretty stable across the various scenarios. The ternary is consistently better than the branchless over the considered cases, especially when the branch predictor kicks in. Finally, the operator%, while being more general and significantly slower, has chances to get optimized to become the fastest as in the case of particular const values of the right hand side.
Of course this is completely platform dependent, who knows how this will be on an Arduino :)
If 'len' in your code is big enough, then the conditional will be faster, as the branch predictors will nearly always guess correctly.
If not, then I believe this is closely connected to circular queues, where it is often the case that the length is a power of 2. This will enable the compiler to substitute modulo with a simple AND.
The code is the following:
#include <stdio.h>
#include <stdlib.h>
#define modulo
int main()
{
int iterations = 1000000000;
int size = 16;
int a[size];
unsigned long long res = 0;
int i, j;
for (i=0;i<size;i++)
a[i] = i;
for (i=0,j=0;i<iterations;i++)
{
j++;
#ifdef modulo
j %= size;
#else
if (j >= size)
j = 0;
#endif
res += a[j];
}
printf("%llu\n", res);
}
size=15:
modulo: 4,868s
cond: 1,291s
size=16:
modulo: 1,067s
cond: 1,599s
Compiled in gcc 7.3.0 , with -O3 optimization.
The machine is an i7 920.
I read article on making a fast hash map. A bottle neck can be the modulus operator to find the hash bucket. They suggested to make your number of buckets a power of 2. Apparently doing modulus by power of two means just like looking at last n bits.
Modulo operator is expensive but the division is expensive too. So converting your code from using modulo operator to division is not going to optimize your code.
(i + 1) % len
To optimize the above code
if ((i+1)==len){
return 0
} else {
return i+1
}
Modulo can be done with a single processor instruction on most architectures (ex. DIV on x86). However it's likely a premature optimization for what you need.

Fastest de-interleave operation in C?

I have a pointer to an array of bytes mixed that contains the interleaved bytes of two distinct arrays array1 and array2. Say mixed looks something like this:
a1b2c3d4...
What I need to do is de-interleave the bytes so I get array1 = abcd... and array2 = 1234.... I know the length of mixed ahead of time, and the lengths of array1 and array2 are equivalent, both equal to mixed / 2.
Here is my current implementation (array1 and array2 are already allocated):
int i, j;
int mixedLength_2 = mixedLength / 2;
for (i = 0, j = 0; i < mixedLength_2; i++, j += 2)
{
array1[i] = mixed[j];
array2[i] = mixed[j+1];
}
This avoids any expensive multiplication or division operations, but still doesn't run fast enough. I'm hoping there is something like memcpy that takes an indexer that can use low-level block copy operations to speed up the process. Is there a faster implementation than what I currently have?
Edit
The target platform is Objective-C for iOS and Mac. A fast operation is more important for iOS devices, so a solution targeting iOS specifically would be better than nothing.
Update
Thanks everyone for the responses, especially Stephen Canon, Graham Lee, and Mecki. Here is my "master" function that uses Stephen's NEON intrinsics if available and otherwise Graham's union cursors with a reduced number of iterations as suggested by Mecki.
void interleave(const uint8_t *srcA, const uint8_t *srcB, uint8_t *dstAB, size_t dstABLength)
{
#if defined __ARM_NEON__
// attempt to use NEON intrinsics
// iterate 32-bytes at a time
div_t dstABLength_32 = div(dstABLength, 32);
if (dstABLength_32.rem == 0)
{
while (dstABLength_32.quot --> 0)
{
const uint8x16_t a = vld1q_u8(srcA);
const uint8x16_t b = vld1q_u8(srcB);
const uint8x16x2_t ab = { a, b };
vst2q_u8(dstAB, ab);
srcA += 16;
srcB += 16;
dstAB += 32;
}
return;
}
// iterate 16-bytes at a time
div_t dstABLength_16 = div(dstABLength, 16);
if (dstABLength_16.rem == 0)
{
while (dstABLength_16.quot --> 0)
{
const uint8x8_t a = vld1_u8(srcA);
const uint8x8_t b = vld1_u8(srcB);
const uint8x8x2_t ab = { a, b };
vst2_u8(dstAB, ab);
srcA += 8;
srcB += 8;
dstAB += 16;
}
return;
}
#endif
// if the bytes were not aligned properly
// or NEON is unavailable, fall back to
// an optimized iteration
// iterate 8-bytes at a time
div_t dstABLength_8 = div(dstABLength, 8);
if (dstABLength_8.rem == 0)
{
typedef union
{
uint64_t wide;
struct { uint8_t a1; uint8_t b1; uint8_t a2; uint8_t b2; uint8_t a3; uint8_t b3; uint8_t a4; uint8_t b4; } narrow;
} ab8x8_t;
uint64_t *dstAB64 = (uint64_t *)dstAB;
int j = 0;
for (int i = 0; i < dstABLength_8.quot; i++)
{
ab8x8_t cursor;
cursor.narrow.a1 = srcA[j ];
cursor.narrow.b1 = srcB[j++];
cursor.narrow.a2 = srcA[j ];
cursor.narrow.b2 = srcB[j++];
cursor.narrow.a3 = srcA[j ];
cursor.narrow.b3 = srcB[j++];
cursor.narrow.a4 = srcA[j ];
cursor.narrow.b4 = srcB[j++];
dstAB64[i] = cursor.wide;
}
return;
}
// iterate 4-bytes at a time
div_t dstABLength_4 = div(dstABLength, 4);
if (dstABLength_4.rem == 0)
{
typedef union
{
uint32_t wide;
struct { uint8_t a1; uint8_t b1; uint8_t a2; uint8_t b2; } narrow;
} ab8x4_t;
uint32_t *dstAB32 = (uint32_t *)dstAB;
int j = 0;
for (int i = 0; i < dstABLength_4.quot; i++)
{
ab8x4_t cursor;
cursor.narrow.a1 = srcA[j ];
cursor.narrow.b1 = srcB[j++];
cursor.narrow.a2 = srcA[j ];
cursor.narrow.b2 = srcB[j++];
dstAB32[i] = cursor.wide;
}
return;
}
// iterate 2-bytes at a time
div_t dstABLength_2 = div(dstABLength, 2);
typedef union
{
uint16_t wide;
struct { uint8_t a; uint8_t b; } narrow;
} ab8x2_t;
uint16_t *dstAB16 = (uint16_t *)dstAB;
for (int i = 0; i < dstABLength_2.quot; i++)
{
ab8x2_t cursor;
cursor.narrow.a = srcA[i];
cursor.narrow.b = srcB[i];
dstAB16[i] = cursor.wide;
}
}
void deinterleave(const uint8_t *srcAB, uint8_t *dstA, uint8_t *dstB, size_t srcABLength)
{
#if defined __ARM_NEON__
// attempt to use NEON intrinsics
// iterate 32-bytes at a time
div_t srcABLength_32 = div(srcABLength, 32);
if (srcABLength_32.rem == 0)
{
while (srcABLength_32.quot --> 0)
{
const uint8x16x2_t ab = vld2q_u8(srcAB);
vst1q_u8(dstA, ab.val[0]);
vst1q_u8(dstB, ab.val[1]);
srcAB += 32;
dstA += 16;
dstB += 16;
}
return;
}
// iterate 16-bytes at a time
div_t srcABLength_16 = div(srcABLength, 16);
if (srcABLength_16.rem == 0)
{
while (srcABLength_16.quot --> 0)
{
const uint8x8x2_t ab = vld2_u8(srcAB);
vst1_u8(dstA, ab.val[0]);
vst1_u8(dstB, ab.val[1]);
srcAB += 16;
dstA += 8;
dstB += 8;
}
return;
}
#endif
// if the bytes were not aligned properly
// or NEON is unavailable, fall back to
// an optimized iteration
// iterate 8-bytes at a time
div_t srcABLength_8 = div(srcABLength, 8);
if (srcABLength_8.rem == 0)
{
typedef union
{
uint64_t wide;
struct { uint8_t a1; uint8_t b1; uint8_t a2; uint8_t b2; uint8_t a3; uint8_t b3; uint8_t a4; uint8_t b4; } narrow;
} ab8x8_t;
uint64_t *srcAB64 = (uint64_t *)srcAB;
int j = 0;
for (int i = 0; i < srcABLength_8.quot; i++)
{
ab8x8_t cursor;
cursor.wide = srcAB64[i];
dstA[j ] = cursor.narrow.a1;
dstB[j++] = cursor.narrow.b1;
dstA[j ] = cursor.narrow.a2;
dstB[j++] = cursor.narrow.b2;
dstA[j ] = cursor.narrow.a3;
dstB[j++] = cursor.narrow.b3;
dstA[j ] = cursor.narrow.a4;
dstB[j++] = cursor.narrow.b4;
}
return;
}
// iterate 4-bytes at a time
div_t srcABLength_4 = div(srcABLength, 4);
if (srcABLength_4.rem == 0)
{
typedef union
{
uint32_t wide;
struct { uint8_t a1; uint8_t b1; uint8_t a2; uint8_t b2; } narrow;
} ab8x4_t;
uint32_t *srcAB32 = (uint32_t *)srcAB;
int j = 0;
for (int i = 0; i < srcABLength_4.quot; i++)
{
ab8x4_t cursor;
cursor.wide = srcAB32[i];
dstA[j ] = cursor.narrow.a1;
dstB[j++] = cursor.narrow.b1;
dstA[j ] = cursor.narrow.a2;
dstB[j++] = cursor.narrow.b2;
}
return;
}
// iterate 2-bytes at a time
div_t srcABLength_2 = div(srcABLength, 2);
typedef union
{
uint16_t wide;
struct { uint8_t a; uint8_t b; } narrow;
} ab8x2_t;
uint16_t *srcAB16 = (uint16_t *)srcAB;
for (int i = 0; i < srcABLength_2.quot; i++)
{
ab8x2_t cursor;
cursor.wide = srcAB16[i];
dstA[i] = cursor.narrow.a;
dstB[i] = cursor.narrow.b;
}
}
Off the top of my head, I don't know of a library function for de-interleaving 2 channel byte data. However it's worth filing a bug report with Apple to request such a function.
In the meantime, it's pretty easy to vectorize such a function using NEON or SSE intrinsics. Specifically, on ARM you will want to use vld1q_u8 to load a vector from each source array, vuzpq_u8 to de-interleave them, and vst1q_u8 to store the resulting vectors; here's a rough sketch that I haven't tested or even tried to build, but it should illustrate the general idea. More sophisticated implementations are definitely possible (in particular, NEON can load/store two 16B registers in a single instruction, which the compiler may not do with this, and some amount of pipelining and/or unrolling may be beneficial depending on how long your buffers are):
#if defined __ARM_NEON__
# include <arm_neon.h>
#endif
#include <stdint.h>
#include <stddef.h>
void deinterleave(uint8_t *mixed, uint8_t *array1, uint8_t *array2, size_t mixedLength) {
#if defined __ARM_NEON__
size_t vectors = mixedLength / 32;
mixedLength %= 32;
while (vectors --> 0) {
const uint8x16_t src0 = vld1q_u8(mixed);
const uint8x16_t src1 = vld1q_u8(mixed + 16);
const uint8x16x2_t dst = vuzpq_u8(src0, src1);
vst1q_u8(array1, dst.val[0]);
vst1q_u8(array2, dst.val[1]);
mixed += 32;
array1 += 16;
array2 += 16;
}
#endif
for (size_t i=0; i<mixedLength/2; ++i) {
array1[i] = mixed[2*i];
array2[i] = mixed[2*i + 1];
}
}
I've only tested this lightly but it seemed at least twice as fast as your version:
typedef union {
uint16_t wide;
struct { uint8_t top; uint8_t bottom; } narrow;
} my_union;
uint16_t *source = (uint16_t *)mixed;
for (int i = 0; i < mixedLength/2; i++)
{
my_union cursor;
cursor.wide = source[i];
array1[i] = cursor.narrow.top;
array2[i] = cursor.narrow.bottom;
}
Notice that I wasn't careful with structure packing, but that in this case on this architecture that isn't a problem. Notice also someone might complain at my choice of naming top and bottom; I assume you know which half of which integers you need.
Okay, here is your original method:
static void simpleDeint (
uint8_t * array1, uint8_t * array2, uint8_t * mixed, int mixedLength
) {
int i, j;
int mixedLength_2 = mixedLength / 2;
for (i = 0, j = 0; i < mixedLength_2; i++, j += 2)
{
array1[i] = mixed[j];
array2[i] = mixed[j+1];
}
}
With 10 million entries and -O3 (compiler shall optimize for maximum speed), I can run this 154 times per second on my Mac.
Here is my first suggestion:
static void structDeint (
uint8_t * array1, uint8_t * array2, uint8_t * mixed, int mixedLength
) {
int i;
int len;
uint8_t * array1Ptr = (uint8_t *)array1;
uint8_t * array2Ptr = (uint8_t *)array2;
struct {
uint8_t byte1;
uint8_t byte2;
} * tb = (void *)mixed;
len = mixedLength / 2;
for (i = 0; i < len; i++) {
*(array1Ptr++) = tb->byte1;
*(array2Ptr++) = tb->byte2;
tb++;
}
}
Same count and optimization as before, I get 193 runs per second.
Now the suggestion from Graham Lee:
static void unionDeint (
uint8_t * array1, uint8_t * array2, uint8_t * mixed, int mixedLength
) {
union my_union {
uint16_t wide;
struct { uint8_t top; uint8_t bottom; } narrow;
};
uint16_t * source = (uint16_t *)mixed;
for (int i = 0; i < mixedLength/2; i++) {
union my_union cursor;
cursor.wide = source[i];
array1[i] = cursor.narrow.top;
array2[i] = cursor.narrow.bottom;
}
}
Same setup as before, 198 runs per second (NOTE: This method is not endian safe, result depends on CPU endianess. In your case array1 and array2 are probably swapped since ARM is little endian, so you would have to swap them in the code).
Here's my best one so far:
static void uint32Deint (
uint8_t * array1, uint8_t * array2, uint8_t * mixed, int mixedLength
) {
int i;
int count;
uint32_t * fourBytes = (void *)mixed;
uint8_t * array1Ptr = (uint8_t *)array1;
uint8_t * array2Ptr = (uint8_t *)array2;
count = mixedLength / 4;
for (i = 0; i < count; i++) {
uint32_t temp = *(fourBytes++);
#if __LITTLE_ENDIAN__
*(array1Ptr++) = (uint8_t)(temp & 0xFF);
temp >>= 8;
*(array2Ptr++) = (uint8_t)(temp & 0xFF);
temp >>= 8;
*(array1Ptr++) = (uint8_t)(temp & 0xFF);
temp >>= 8;
*(array2Ptr++) = tb->byte2;
#else
*(array1Ptr++) = (uint8_t)(temp >> 24);
*(array2Ptr++) = (uint8_t)((temp >> 16) & 0xFF);
*(array1Ptr++) = (uint8_t)((temp >> 8) & 0xFF);
*(array2Ptr++) = (uint8_t)(temp & 0xFF);
#endif
}
// Either it is a multiple of 4 or a multiple of 2.
// If it is a multiple of 2, 2 bytes are left over.
if (count * 4 != mixedLength) {
*(array1Ptr) = mixed[mixedLength - 2];
*(array2Ptr) = mixed[mixedLength - 1];
}
}
Same setup as above, 219 times a second and unless I made a mistake, should work with either endianess.
I recommend Graham's solution, but if this is really speed critical and you are willing to go Assembler, you can get even faster.
The idea is this:
Read an entire 32bit integer from mixed. You'll get 'a1b2'.
Rotate the lower 16bit by 8 bits to get '1ab2'(we are using little endians, since this is the default in ARM and therefore Apple A#, so the first two bytes are the lower ones).
Rotate the entire 32bit register right(I think it's right...) by 8 bits to get '21ab'.
Rotate the lower 16bit by 8 bits to get '12ab'
Write the lower 8 bits to array2.
Rotate the entire 32bit register by 16bit.
Write the lower 8 bits to array1
Advance array1 by 16bit, array2 by 16bit, and mixed by 32bit.
Repeat.
We have traded 2 memory reads(assuming we use the Graham's version or equivalent) and 4 memory with one memory read, two memory writes and 4 register operations. While the number of operations has gone up from 6 to 7, register operations are faster than memory operations, so it's more efficient that way. Also, since we read from mixed 32bit at a time instead of 16, we cut iteration management by half.
PS: Theoretically this can also be done for 64bit architecture, but doing all those rotations for 'a1b2c3d4' will drive you to madness.
For x86 SSE, the pack and punpck instructions are what you need. Examples using AVX for the convenience of non-destructive 3-operand instructions. (Not using AVX2 256b-wide instructions, because the 256b pack/unpck instructions do two 128b unpacks in the low and high 128b lanes, so you'd need a shuffle to get things in the correct final order.)
An intrinsics version of the following would work the same. Asm instructions are shorter to type for just writing a quick answer.
Interleave: abcd and 1234 -> a1b2c3d4:
# loop body:
vmovdqu (%rax), %xmm0 # load the sources
vmovdqu (%rbx), %xmm1
vpunpcklbw %xmm0, %xmm1, %xmm2 # low halves -> 128b reg
vpunpckhbw %xmm0, %xmm2, %xmm3 # high halves -> 128b reg
vmovdqu %xmm2, (%rdi) # store the results
vmovdqu %xmm3, 16(%rdi)
# blah blah some loop structure.
`punpcklbw` interleaves the bytes in the low 64 of the two source `xmm` registers. There are `..wd` (word->dword), and dword->qword versions which would be useful for 16 or 32bit elements.
De-interleave: a1b2c3d4 -> abcd and 1234
#outside the loop
vpcmpeqb %xmm5, %xmm5 # set to all-1s
vpsrlw $8, %xmm5, %xmm5 # every 16b word has low 8b = 0xFF, high 8b = 0.
# loop body
vmovdqu (%rsi), %xmm2 # load two src chunks
vmovdqu 16(%rsi), %xmm3
vpand %xmm2, %xmm5, %xmm0 # mask to leave only the odd bytes
vpand %xmm3, %xmm5, %xmm1
vpackuswb %xmm0, %xmm1, %xmm4
vmovdqu %xmm4, (%rax) # store 16B of a[]
vpsrlw $8, %xmm2, %xmm6 # even bytes -> odd bytes
vpsrlw $8, %xmm3, %xmm7
vpackuswb %xmm6, %xmm7, %xmm4
vmovdqu %xmm4, (%rbx)
This can of course use a lot fewer registers. I avoided reusing registers for readability, not performance. Hardware register renaming makes reuse a non-issue, as long as you start with something that doesn't depend on the previous value. (e.g. movd, not movss or pinsrd.)
Deinterleave is so much more work because the pack instructions do signed or unsigned saturation, so the upper 8b of each 16b element has to be zeroed first.
An alternative would be to use pshufb to pack the odd or even words of a single source reg into the low 64 of a register. However, outside of the AMD XOP instruction set's VPPERM, there isn't a shuffle that can select bytes from 2 registers at once (like Altivec's much-loved vperm). So with just SSE/AVX, you'd need 2 shuffles for every 128b of interleaved data. And since store-port usage could be the bottleneck, a punpck to combine two 64bit chunks of a into a single register to set up a 128b store.
With AMD XOP, deinterleave would be 2x128b loads, 2 VPPERM, and 2x128b stores.
premature optimisation is bad
your compiler is probably better at optimising than you are.
That said, there are things you can do to help out the compiler because you have semantic knowledge of your data that a compiler cannot have:
read and write as many bytes as you can, up to the native word size - memory operations are expensive, so do manipulations in registers where possible
unroll loops - look into "Duff's Device".
FWIW, I produced two versions of your copy loop, one much the same as yours, the second using what most would consider "optimal" (albeit still simple) C code:
void test1(byte *p, byte *p1, byte *p2, int n)
{
int i, j;
for (i = 0, j = 0; i < n / 2; i++, j += 2) {
p1[i] = p[j];
p2[i] = p[j + 1];
}
}
void test2(byte *p, byte *p1, byte *p2, int n)
{
while (n) {
*p1++ = *p++;
*p2++ = *p++;
n--; n--;
}
}
With gcc -O3 -S on Intel x86 they both produced almost identical assembly code. Here are the inner loops:
LBB1_2:
movb -1(%rdi), %al
movb %al, (%rsi)
movb (%rdi), %al
movb %al, (%rdx)
incq %rsi
addq $2, %rdi
incq %rdx
decq %rcx
jne LBB1_2
and
LBB2_2:
movb -1(%rdi), %al
movb %al, (%rsi)
movb (%rdi), %al
movb %al, (%rdx)
incq %rsi
addq $2, %rdi
incq %rdx
addl $-2, %ecx
jne LBB2_2
Both have the same number of instructions, the difference accounted for solely because the first version counts up to n / 2, and the second counts down to zero.
EDIT here's a better version:
/* non-portable - assumes little endian */
void test3(byte *p, byte *p1, byte *p2, int n)
{
ushort *ps = (ushort *)p;
n /= 2;
while (n) {
ushort n = *ps++;
*p1++ = n;
*p2++ = n >> 8;
}
}
resulting in:
LBB3_2:
movzwl (%rdi), %ecx
movb %cl, (%rsi)
movb %ch, (%rdx) # NOREX
addq $2, %rdi
incq %rsi
incq %rdx
decq %rax
jne LBB3_2
which is one fewer instruction because it takes advantage of the immediate access to %cl and %ch.

optimized itoa function

I am thinking on how to implement the conversion of an integer (4byte, unsigned) to string with SSE instructions. The usual routine is to divide the number and store it in a local variable, then invert the string (the inversion routine is missing in this example):
char *convert(unsigned int num, int base) {
static char buff[33];
char *ptr;
ptr = &buff[sizeof(buff) - 1];
*ptr = '\0';
do {
*--ptr="0123456789abcdef"[num%base];
num /= base;
} while(num != 0);
return ptr;
}
But inversion will take extra time. Is there any other algorithm than can be used preferably with SSE instruction to parallelize the function?
Terje Mathisen invented a very fast itoa() that does not require lookup tables. If you're not interested in the explanation of how it works, skip down to Performance or Implementation.
More than 15 years ago Terje Mathisen came up with a parallelized itoa() for base 10. The idea is to take a 32-bit value and break it into two chunks of 5 digits. (A quick Google search for "Terje Mathisen itoa" gave this post: http://computer-programming-forum.com/46-asm/7aa4b50bce8dd985.htm)
We start like so:
void itoa(char *buf, uint32_t val)
{
lo = val % 100000;
hi = val / 100000;
itoa_half(&buf[0], hi);
itoa_half(&buf[5], lo);
}
Now we can just need an algorithm that can convert any integer in the domain [0, 99999] to a string. A naive way to do that might be:
// 0 <= val <= 99999
void itoa_half(char *buf, uint32_t val)
{
// Move all but the first digit to the right of the decimal point.
float tmp = val / 10000.0;
for(size_t i = 0; i < 5; i++)
{
// Extract the next digit.
int digit = (int) tmp;
// Convert to a character.
buf[i] = '0' + (char) digit;
// Remove the lead digit and shift left 1 decimal place.
tmp = (tmp - digit) * 10.0;
}
}
Rather than use floating-point, we will use 4.28 fixed-point math because it is significantly faster in our case. That is, we fix the binary point at the 28th bit position such that 1.0 is represented as 2^28. To convert into fixed-point, we simply multiply by 2^28. We can easily round down to the nearest integer by masking with 0xf0000000, and we can extract the fractional portion by masking with 0x0fffffff.
(Note: Terje's algorithm differs slightly in the choice of fixed-point format.)
So now we have:
typedef uint32_t fix4_28;
// 0 <= val <= 99999
void itoa_half(char *buf, uint32_t val)
{
// Convert `val` to fixed-point and divide by 10000 in a single step.
// N.B. we would overflow a uint32_t if not for the parentheses.
fix4_28 tmp = val * ((1 << 28) / 10000);
for(size_t i = 0; i < 5; i++)
{
int digit = (int)(tmp >> 28);
buf[i] = '0' + (char) digit;
tmp = (tmp & 0x0fffffff) * 10;
}
}
The only problem with this code is that 2^28 / 10000 = 26843.5456, which is truncated to 26843. This causes inaccuracies for certain values. For example, itoa_half(buf, 83492) produces the string "83490". If we apply a small correction in our conversion to 4.28 fixed-point, then the algorithm works for all numbers in the domain [0, 99999]:
// 0 <= val <= 99999
void itoa_half(char *buf, uint32_t val)
{
fix4_28 const f1_10000 = (1 << 28) / 10000;
// 2^28 / 10000 is 26843.5456, but 26843.75 is sufficiently close.
fix4_28 tmp = val * ((f1_10000 + 1) - (val / 4);
for(size_t i = 0; i < 5; i++)
{
int digit = (int)(tmp >> 28);
buf[i] = '0' + (char) digit;
tmp = (tmp & 0x0fffffff) * 10;
}
}
Terje interleaves the itoa_half part for the low & high halves:
void itoa(char *buf, uint32_t val)
{
fix4_28 const f1_10000 = (1 << 28) / 10000;
fix4_28 tmplo, tmphi;
lo = val % 100000;
hi = val / 100000;
tmplo = lo * (f1_10000 + 1) - (lo / 4);
tmphi = hi * (f1_10000 + 1) - (hi / 4);
for(size_t i = 0; i < 5; i++)
{
buf[i + 0] = '0' + (char)(tmphi >> 28);
buf[i + 5] = '0' + (char)(tmplo >> 28);
tmphi = (tmphi & 0x0fffffff) * 10;
tmplo = (tmplo & 0x0fffffff) * 10;
}
}
There is an additional trick that makes the code slightly faster if the loop is fully unrolled. The multiply by 10 is implemented as either a LEA+SHL or LEA+ADD sequence. We can save 1 instruction by multiplying instead by 5, which requires only a single LEA. This has the same effect as shifting tmphi and tmplo right by 1 position each pass through the loop, but we can compensate by adjusting our shift counts and masks like this:
uint32_t mask = 0x0fffffff;
uint32_t shift = 28;
for(size_t i = 0; i < 5; i++)
{
buf[i + 0] = '0' + (char)(tmphi >> shift);
buf[i + 5] = '0' + (char)(tmplo >> shift);
tmphi = (tmphi & mask) * 5;
tmplo = (tmplo & mask) * 5;
mask >>= 1;
shift--;
}
This only helps if the loop is fully-unrolled because you can precalculate the value of shift and mask for each iteration.
Finally, this routine produces zero-padded results. You can get rid of the padding by returning a pointer to the first character that is not 0 or the last character if val == 0:
char *itoa_unpadded(char *buf, uint32_t val)
{
char *p;
itoa(buf, val);
p = buf;
// Note: will break on GCC, but you can work around it by using memcpy() to dereference p.
if (*((uint64_t *) p) == 0x3030303030303030)
p += 8;
if (*((uint32_t *) p) == 0x30303030)
p += 4;
if (*((uint16_t *) p) == 0x3030)
p += 2;
if (*((uint8_t *) p) == 0x30)
p += 1;
return min(p, &buf[15]);
}
There is one additional trick applicable to 64-bit (i.e. AMD64) code. The extra, wider registers make it efficient to accumulate each 5-digit group in a register; after the last digit has been calculated, you can smash them together with SHRD, OR them with 0x3030303030303030, and store to memory. This improves performance for me by about 12.3%.
Vectorization
We could execute the above algorithm as-is on the SSE units, but there is almost no gain in performance. However, if we split the value into smaller chunks, we can take advantage of SSE4.1 32-bit multiply instructions. I tried three different splits:
2 groups of 5 digits
3 groups of 4 digits
4 groups of 3 digits
The fastest variant was 4 groups of 3 digits. See below for the results.
Performance
I tested many variants of Terje's algorithm in addition to the algorithms suggested by vitaut and Inge Henriksen. I verified through exhaustive testing of inputs that each algorithm's output matches itoa().
My numbers are taken from a Westmere E5640 running Windows 7 64-bit. I benchmark at real-time priority and locked to core 0. I execute each algorithm 4 times to force everything into the cache. I time 2^24 calls using RDTSCP to remove the effect of any dynamic clock speed changes.
I timed 5 different patterns of inputs:
itoa(0 .. 9) -- nearly best-case performance
itoa(1000 .. 1999) -- longer output, no branch mispredicts
itoa(100000000 .. 999999999) -- longest output, no branch mispredicts
itoa(256 random values) -- varying output length
itoa(65536 random values) -- varying output length and thrashes L1/L2 caches
The data:
ALG TINY MEDIUM LARGE RND256 RND64K NOTES
NULL 7 clk 7 clk 7 clk 7 clk 7 clk Benchmark overhead baseline
TERJE_C 63 clk 62 clk 63 clk 57 clk 56 clk Best C implementation of Terje's algorithm
TERJE_ASM 48 clk 48 clk 50 clk 45 clk 44 clk Naive, hand-written AMD64 version of Terje's algorithm
TERJE_SSE 41 clk 42 clk 41 clk 34 clk 35 clk SSE intrinsic version of Terje's algorithm with 1/3/3/3 digit grouping
INGE_0 12 clk 31 clk 71 clk 72 clk 72 clk Inge's first algorithm
INGE_1 20 clk 23 clk 45 clk 69 clk 96 clk Inge's second algorithm
INGE_2 18 clk 19 clk 32 clk 29 clk 36 clk Improved version of Inge's second algorithm
VITAUT_0 9 clk 16 clk 32 clk 35 clk 35 clk vitaut's algorithm
VITAUT_1 11 clk 15 clk 33 clk 31 clk 30 clk Improved version of vitaut's algorithm
LIBC 46 clk 128 clk 329 clk 339 clk 340 clk MSVCRT12 implementation
My compiler (VS 2013 Update 4) produced surprisingly bad code; the assembly version of Terje's algorithm is just a naive translation, and it's a full 21% faster. I was also surprised at the performance of the SSE implementation, which I expected to be slower. The big surprise was how fast INGE_2, VITAUT_0, and VITAUT_1 were. Bravo to vitaut for coming up with a portable solution that bests even my best effort at the assembly level.
Note: INGE_1 is a modified version of Inge Henriksen's second algorithm because the original has a bug.
INGE_2 is based on the second algorithm that Inge Henriksen gave. Rather than storing pointers to the precalculated strings in a char*[] array, it stores the strings themselves in a char[][5] array. The other big improvement is in how it stores characters in the output buffer. It stores more characters than necessary and uses pointer arithmetic to return a pointer to the first non-zero character. The result is substantially faster -- competitive even with the SSE-optimized version of Terje's algorithm. It should be noted that the microbenchmark favors this algorithm a bit because in real-world applications the 600K data set will constantly blow the caches.
VITAUT_1 is based on vitaut's algorithm with two small changes. The first change is that it copies character pairs in the main loop, reducing the number of store instructions. Similar to INGE_2, VITAUT_1 copies both final characters and uses pointer arithmetic to return a pointer to the string.
Implementation
Here I give code for the 3 most interesting algorithms.
TERJE_ASM:
; char *itoa_terje_asm(char *buf<rcx>, uint32_t val<edx>)
;
; *** NOTE ***
; buf *must* be 8-byte aligned or this code will break!
itoa_terje_asm:
MOV EAX, 0xA7C5AC47
ADD RDX, 1
IMUL RAX, RDX
SHR RAX, 48 ; EAX = val / 100000
IMUL R11D, EAX, 100000
ADD EAX, 1
SUB EDX, R11D ; EDX = (val % 100000) + 1
IMUL RAX, 214748 ; RAX = (val / 100000) * 2^31 / 10000
IMUL RDX, 214748 ; RDX = (val % 100000) * 2^31 / 10000
; Extract buf[0] & buf[5]
MOV R8, RAX
MOV R9, RDX
LEA EAX, [RAX+RAX] ; RAX = (RAX * 2) & 0xFFFFFFFF
LEA EDX, [RDX+RDX] ; RDX = (RDX * 2) & 0xFFFFFFFF
LEA RAX, [RAX+RAX*4] ; RAX *= 5
LEA RDX, [RDX+RDX*4] ; RDX *= 5
SHR R8, 31 ; R8 = buf[0]
SHR R9, 31 ; R9 = buf[5]
; Extract buf[1] & buf[6]
MOV R10, RAX
MOV R11, RDX
LEA EAX, [RAX+RAX] ; RAX = (RAX * 2) & 0xFFFFFFFF
LEA EDX, [RDX+RDX] ; RDX = (RDX * 2) & 0xFFFFFFFF
LEA RAX, [RAX+RAX*4] ; RAX *= 5
LEA RDX, [RDX+RDX*4] ; RDX *= 5
SHR R10, 31 - 8
SHR R11, 31 - 8
AND R10D, 0x0000FF00 ; R10 = buf[1] << 8
AND R11D, 0x0000FF00 ; R11 = buf[6] << 8
OR R10D, R8D ; R10 = buf[0] | (buf[1] << 8)
OR R11D, R9D ; R11 = buf[5] | (buf[6] << 8)
; Extract buf[2] & buf[7]
MOV R8, RAX
MOV R9, RDX
LEA EAX, [RAX+RAX] ; RAX = (RAX * 2) & 0xFFFFFFFF
LEA EDX, [RDX+RDX] ; RDX = (RDX * 2) & 0xFFFFFFFF
LEA RAX, [RAX+RAX*4] ; RAX *= 5
LEA RDX, [RDX+RDX*4] ; RDX *= 5
SHR R8, 31 - 16
SHR R9, 31 - 16
AND R8D, 0x00FF0000 ; R8 = buf[2] << 16
AND R9D, 0x00FF0000 ; R9 = buf[7] << 16
OR R8D, R10D ; R8 = buf[0] | (buf[1] << 8) | (buf[2] << 16)
OR R9D, R11D ; R9 = buf[5] | (buf[6] << 8) | (buf[7] << 16)
; Extract buf[3], buf[4], buf[8], & buf[9]
MOV R10, RAX
MOV R11, RDX
LEA EAX, [RAX+RAX] ; RAX = (RAX * 2) & 0xFFFFFFFF
LEA EDX, [RDX+RDX] ; RDX = (RDX * 2) & 0xFFFFFFFF
LEA RAX, [RAX+RAX*4] ; RAX *= 5
LEA RDX, [RDX+RDX*4] ; RDX *= 5
SHR R10, 31 - 24
SHR R11, 31 - 24
AND R10D, 0xFF000000 ; R10 = buf[3] << 24
AND R11D, 0xFF000000 ; R11 = buf[7] << 24
AND RAX, 0x80000000 ; RAX = buf[4] << 31
AND RDX, 0x80000000 ; RDX = buf[9] << 31
OR R10D, R8D ; R10 = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24)
OR R11D, R9D ; R11 = buf[5] | (buf[6] << 8) | (buf[7] << 16) | (buf[8] << 24)
LEA RAX, [R10+RAX*2] ; RAX = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24) | (buf[4] << 32)
LEA RDX, [R11+RDX*2] ; RDX = buf[5] | (buf[6] << 8) | (buf[7] << 16) | (buf[8] << 24) | (buf[9] << 32)
; Compact the character strings
SHL RAX, 24 ; RAX = (buf[0] << 24) | (buf[1] << 32) | (buf[2] << 40) | (buf[3] << 48) | (buf[4] << 56)
MOV R8, 0x3030303030303030
SHRD RAX, RDX, 24 ; RAX = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24) | (buf[4] << 32) | (buf[5] << 40) | (buf[6] << 48) | (buf[7] << 56)
SHR RDX, 24 ; RDX = buf[8] | (buf[9] << 8)
; Store 12 characters. The last 2 will be null bytes.
OR R8, RAX
LEA R9, [RDX+0x3030]
MOV [RCX], R8
MOV [RCX+8], R9D
; Convert RCX into a bit pointer.
SHL RCX, 3
; Scan the first 8 bytes for a non-zero character.
OR EDX, 0x00000100
TEST RAX, RAX
LEA R10, [RCX+64]
CMOVZ RAX, RDX
CMOVZ RCX, R10
; Scan the next 4 bytes for a non-zero character.
TEST EAX, EAX
LEA R10, [RCX+32]
CMOVZ RCX, R10
SHR RAX, CL ; N.B. RAX >>= (RCX % 64); this works because buf is 8-byte aligned.
; Scan the next 2 bytes for a non-zero character.
TEST AX, AX
LEA R10, [RCX+16]
CMOVZ RCX, R10
SHR EAX, CL ; N.B. RAX >>= (RCX % 32)
; Convert back to byte pointer. N.B. this works because the AMD64 virtual address space is 48-bit.
SAR RCX, 3
; Scan the last byte for a non-zero character.
TEST AL, AL
MOV RAX, RCX
LEA R10, [RCX+1]
CMOVZ RAX, R10
RETN
INGE_2:
uint8_t len100K[100000];
char str100K[100000][5];
void itoa_inge_2_init()
{
memset(str100K, '0', sizeof(str100K));
for(uint32_t i = 0; i < 100000; i++)
{
char buf[6];
itoa(i, buf, 10);
len100K[i] = strlen(buf);
memcpy(&str100K[i][5 - len100K[i]], buf, len100K[i]);
}
}
char *itoa_inge_2(char *buf, uint32_t val)
{
char *p = &buf[10];
uint32_t prevlen;
*p = '\0';
do
{
uint32_t const old = val;
uint32_t mod;
val /= 100000;
mod = old - (val * 100000);
prevlen = len100K[mod];
p -= 5;
memcpy(p, str100K[mod], 5);
}
while(val != 0);
return &p[5 - prevlen];
}
VITAUT_1:
static uint16_t const str100p[100] = {
0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930,
0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931,
0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932,
0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933,
0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934,
0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935,
0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936,
0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937,
0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938,
0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939, };
char *itoa_vitaut_1(char *buf, uint32_t val)
{
char *p = &buf[10];
*p = '\0';
while(val >= 100)
{
uint32_t const old = val;
p -= 2;
val /= 100;
memcpy(p, &str100p[old - (val * 100)], sizeof(uint16_t));
}
p -= 2;
memcpy(p, &str100p[val], sizeof(uint16_t));
return &p[val < 10];
}
The first step to optimizing your code is getting rid of the arbitrary base support. This is because dividing by a constant is almost surely multiplication, but dividing by base is division, and because '0'+n is faster than "0123456789abcdef"[n] (no memory involved in the former).
If you need to go beyond that, you could make lookup tables for each byte in the base you care about (e.g. 10), then vector-add the (e.g. decimal) results for each byte. As in:
00 02 00 80 (input)
0000000000 (place3[0x00])
+0000131072 (place2[0x02])
+0000000000 (place1[0x00])
+0000000128 (place0[0x80])
==========
0000131200 (result)
This post compares several methods of integer to string conversion aka itoa. The fastest method reported there is fmt::format_int from the {fmt} library which is 5-18 times faster than sprintf/std::stringstream and ~4 times faster than a naive ltoa/itoa implementation (the actual numbers may of course vary depending on platform).
Unlike most other methods fmt::format_int does one pass over the digits. It also minimizes the number of integer divisions using the idea from Alexandrescu's talk Fastware. The implementation is available here.
This is of course if C++ is an option and you are not restricted by the itoa's API.
Disclaimer: I'm the author of this method and the fmt library.
http://sourceforge.net/projects/itoa/
Its uses a big static const array of all 4-digits integers and uses it for 32-bits or 64-bits conversion to string.
Portable, no need of a specific instruction set.
The only faster version I could find was in assembly code and limited to 32 bits.
Interesting problem. If you're interested in a 10 radix only itoa() then I have made a 10 times as fast example and a 3 times as fast example as the typical itoa() implementation.
First example (3x performance)
The first, which is 3 times as fast as itoa(), uses a single-pass non-reversal software design pattern and is based on the open source itoa() implementation found in groff.
// itoaSpeedTest.cpp : Defines the entry point for the console application.
//
#pragma comment(lib, "Winmm.lib")
#include "stdafx.h"
#include "Windows.h"
#include <iostream>
#include <time.h>
using namespace std;
#ifdef _WIN32
/** a signed 32-bit integer value type */
#define _INT32 __int32
#else
/** a signed 32-bit integer value type */
#define _INT32 long int // Guess what a 32-bit integer is
#endif
/** minimum allowed value in a signed 32-bit integer value type */
#define _INT32_MIN -2147483647
/** maximum allowed value in a signed 32-bit integer value type */
#define _INT32_MAX 2147483647
/** maximum allowed number of characters in a signed 32-bit integer value type including a '-' */
#define _INT32_MAX_LENGTH 11
#ifdef _WIN32
/** Use to init the clock */
#define TIMER_INIT LARGE_INTEGER frequency;LARGE_INTEGER t1, t2;double elapsedTime;QueryPerformanceFrequency(&frequency);
/** Use to start the performance timer */
#define TIMER_START QueryPerformanceCounter(&t1);
/** Use to stop the performance timer and output the result to the standard stream */
#define TIMER_STOP QueryPerformanceCounter(&t2);elapsedTime=(t2.QuadPart-t1.QuadPart)*1000.0/frequency.QuadPart;wcout<<elapsedTime<<L" ms."<<endl;
#else
/** Use to init the clock */
#define TIMER_INIT
/** Use to start the performance timer */
#define TIMER_START clock_t start;double diff;start=clock();
/** Use to stop the performance timer and output the result to the standard stream */
#define TIMER_STOP diff=(clock()-start)/(double)CLOCKS_PER_SEC;wcout<<fixed<<diff<<endl;
#endif
/** Array used for fast number character lookup */
const char numbersIn10Radix[10] = {'0','1','2','3','4','5','6','7','8','9'};
/** Array used for fast reverse number character lookup */
const char reverseNumbersIn10Radix[10] = {'9','8','7','6','5','4','3','2','1','0'};
const char *reverseArrayEndPtr = &reverseNumbersIn10Radix[9];
/*!
\brief Converts a 32-bit signed integer to a string
\param i [in] Integer
\par Software design pattern
Uses a single pass non-reversing algorithm and is 3x as fast as \c itoa().
\returns Integer as a string
\copyright GNU General Public License
\copyright 1989-1992 Free Software Foundation, Inc.
\date 1989-1992, 2013
\author James Clark<jjc#jclark.com>, 1989-1992
\author Inge Eivind Henriksen<inge#meronymy.com>, 2013
\note Function was originally a part of \a groff, and was refactored & optimized in 2013.
\relates itoa()
*/
const char *Int32ToStr(_INT32 i)
{
// Make room for a 32-bit signed integers digits and the '\0'
char buf[_INT32_MAX_LENGTH + 2];
char *p = buf + _INT32_MAX_LENGTH + 1;
*--p = '\0';
if (i >= 0)
{
do
{
*--p = numbersIn10Radix[i % 10];
i /= 10;
} while (i);
}
else
{
// Negative integer
do
{
*--p = reverseArrayEndPtr[i % 10];
i /= 10;
} while (i);
*--p = '-';
}
return p;
}
int _tmain(int argc, _TCHAR* argv[])
{
TIMER_INIT
// Make sure we are playing fair here
if (sizeof(int) != sizeof(_INT32))
{
cerr << "Error: integer size mismatch; test would be invalid." << endl;
return -1;
}
const int steps = 100;
{
char intBuffer[20];
cout << "itoa() took:" << endl;
TIMER_START;
for (int i = _INT32_MIN; i < i + steps ; i += steps)
itoa(i, intBuffer, 10);
TIMER_STOP;
}
{
cout << "Int32ToStr() took:" << endl;
TIMER_START;
for (int i = _INT32_MIN; i < i + steps ; i += steps)
Int32ToStr(i);
TIMER_STOP;
}
cout << "Done" << endl;
int wait;
cin >> wait;
return 0;
}
On 64-bit Windows the result from running this example is:
itoa() took:
2909.84 ms.
Int32ToStr() took:
991.726 ms.
Done
On 32-bit Windows the result from running this example is:
itoa() took:
3119.6 ms.
Int32ToStr() took:
1031.61 ms.
Done
Second example (10x performance)
If you don't mind spending some time initializing some buffers then it's possible to optimize the function above to be 10x faster than the typical itoa() implementation. What you need to do is to create string buffers rather than character buffers, like this:
// itoaSpeedTest.cpp : Defines the entry point for the console application.
//
#pragma comment(lib, "Winmm.lib")
#include "stdafx.h"
#include "Windows.h"
#include <iostream>
#include <time.h>
using namespace std;
#ifdef _WIN32
/** a signed 32-bit integer value type */
#define _INT32 __int32
/** a signed 8-bit integer value type */
#define _INT8 __int8
/** an unsigned 8-bit integer value type */
#define _UINT8 unsigned _INT8
#else
/** a signed 32-bit integer value type */
#define _INT32 long int // Guess what a 32-bit integer is
/** a signed 8-bit integer value type */
#define _INT8 char
/** an unsigned 8-bit integer value type */
#define _UINT8 unsigned _INT8
#endif
/** minimum allowed value in a signed 32-bit integer value type */
#define _INT32_MIN -2147483647
/** maximum allowed value in a signed 32-bit integer value type */
#define _INT32_MAX 2147483647
/** maximum allowed number of characters in a signed 32-bit integer value type including a '-' */
#define _INT32_MAX_LENGTH 11
#ifdef _WIN32
/** Use to init the clock */
#define TIMER_INIT LARGE_INTEGER frequency;LARGE_INTEGER t1, t2;double elapsedTime;QueryPerformanceFrequency(&frequency);
/** Use to start the performance timer */
#define TIMER_START QueryPerformanceCounter(&t1);
/** Use to stop the performance timer and output the result to the standard stream. Less verbose than \c TIMER_STOP_VERBOSE */
#define TIMER_STOP QueryPerformanceCounter(&t2);elapsedTime=(t2.QuadPart-t1.QuadPart)*1000.0/frequency.QuadPart;wcout<<elapsedTime<<L" ms."<<endl;
#else
/** Use to init the clock to get better precision that 15ms on Windows */
#define TIMER_INIT timeBeginPeriod(10);
/** Use to start the performance timer */
#define TIMER_START clock_t start;double diff;start=clock();
/** Use to stop the performance timer and output the result to the standard stream. Less verbose than \c TIMER_STOP_VERBOSE */
#define TIMER_STOP diff=(clock()-start)/(double)CLOCKS_PER_SEC;wcout<<fixed<<diff<<endl;
#endif
/* Set this as large or small as you want, but has to be in the form 10^n where n >= 1, setting it smaller will
make the buffers smaller but the performance slower. If you want to set it larger than 100000 then you
must add some more cases to the switch blocks. Try to make it smaller to see the difference in
performance. It does however seem to become slower if larger than 100000 */
static const _INT32 numElem10Radix = 100000;
/** Array used for fast lookup number character lookup */
const char *numbersIn10Radix[numElem10Radix] = {};
_UINT8 numbersIn10RadixLen[numElem10Radix] = {};
/** Array used for fast lookup number character lookup */
const char *reverseNumbersIn10Radix[numElem10Radix] = {};
_UINT8 reverseNumbersIn10RadixLen[numElem10Radix] = {};
void InitBuffers()
{
char intBuffer[20];
for (int i = 0; i < numElem10Radix; i++)
{
itoa(i, intBuffer, 10);
size_t numLen = strlen(intBuffer);
char *intStr = new char[numLen + 1];
strcpy(intStr, intBuffer);
numbersIn10Radix[i] = intStr;
numbersIn10RadixLen[i] = numLen;
reverseNumbersIn10Radix[numElem10Radix - 1 - i] = intStr;
reverseNumbersIn10RadixLen[numElem10Radix - 1 - i] = numLen;
}
}
/*!
\brief Converts a 32-bit signed integer to a string
\param i [in] Integer
\par Software design pattern
Uses a single pass non-reversing algorithm with string buffers and is 10x as fast as \c itoa().
\returns Integer as a string
\copyright GNU General Public License
\copyright 1989-1992 Free Software Foundation, Inc.
\date 1989-1992, 2013
\author James Clark<jjc#jclark.com>, 1989-1992
\author Inge Eivind Henriksen, 2013
\note This file was originally a part of \a groff, and was refactored & optimized in 2013.
\relates itoa()
*/
const char *Int32ToStr(_INT32 i)
{
/* Room for INT_DIGITS digits, - and '\0' */
char buf[_INT32_MAX_LENGTH + 2];
char *p = buf + _INT32_MAX_LENGTH + 1;
_INT32 modVal;
*--p = '\0';
if (i >= 0)
{
do
{
modVal = i % numElem10Radix;
switch(numbersIn10RadixLen[modVal])
{
case 5:
*--p = numbersIn10Radix[modVal][4];
case 4:
*--p = numbersIn10Radix[modVal][3];
case 3:
*--p = numbersIn10Radix[modVal][2];
case 2:
*--p = numbersIn10Radix[modVal][1];
default:
*--p = numbersIn10Radix[modVal][0];
}
i /= numElem10Radix;
} while (i);
}
else
{
// Negative integer
const char **reverseArray = &reverseNumbersIn10Radix[numElem10Radix - 1];
const _UINT8 *reverseArrayLen = &reverseNumbersIn10RadixLen[numElem10Radix - 1];
do
{
modVal = i % numElem10Radix;
switch(reverseArrayLen[modVal])
{
case 5:
*--p = reverseArray[modVal][4];
case 4:
*--p = reverseArray[modVal][3];
case 3:
*--p = reverseArray[modVal][2];
case 2:
*--p = reverseArray[modVal][1];
default:
*--p = reverseArray[modVal][0];
}
i /= numElem10Radix;
} while (i);
*--p = '-';
}
return p;
}
int _tmain(int argc, _TCHAR* argv[])
{
InitBuffers();
TIMER_INIT
// Make sure we are playing fair here
if (sizeof(int) != sizeof(_INT32))
{
cerr << "Error: integer size mismatch; test would be invalid." << endl;
return -1;
}
const int steps = 100;
{
char intBuffer[20];
cout << "itoa() took:" << endl;
TIMER_START;
for (int i = _INT32_MIN; i < i + steps ; i += steps)
itoa(i, intBuffer, 10);
TIMER_STOP;
}
{
cout << "Int32ToStr() took:" << endl;
TIMER_START;
for (int i = _INT32_MIN; i < i + steps ; i += steps)
Int32ToStr(i);
TIMER_STOP;
}
cout << "Done" << endl;
int wait;
cin >> wait;
return 0;
}
On 64-bit Windows the result from running this example is:
itoa() took:
2914.12 ms.
Int32ToStr() took:
306.637 ms.
Done
On 32-bit Windows the result from running this example is:
itoa() took:
3126.12 ms.
Int32ToStr() took:
299.387 ms.
Done
Why do you use reverse string lookup buffers?
It's possible to do this without the reverse string lookup buffers (thus saving 1/2 the internal memory), but this makes it significantly slower (timed at about 850 ms on 64-bit and 380 ms on 32-bit systems). It's not clear to me exactly why it's so much slower - especially on 64-bit systems, to test this further yourself you can change simply the following code:
#define _UINT32 unsigned _INT32
...
static const _UINT32 numElem10Radix = 100000;
...
void InitBuffers()
{
char intBuffer[20];
for (int i = 0; i < numElem10Radix; i++)
{
_itoa(i, intBuffer, 10);
size_t numLen = strlen(intBuffer);
char *intStr = new char[numLen + 1];
strcpy(intStr, intBuffer);
numbersIn10Radix[i] = intStr;
numbersIn10RadixLen[i] = numLen;
}
}
...
const char *Int32ToStr(_INT32 i)
{
char buf[_INT32_MAX_LENGTH + 2];
char *p = buf + _INT32_MAX_LENGTH + 1;
_UINT32 modVal;
*--p = '\0';
_UINT32 j = i;
do
{
modVal = j % numElem10Radix;
switch(numbersIn10RadixLen[modVal])
{
case 5:
*--p = numbersIn10Radix[modVal][4];
case 4:
*--p = numbersIn10Radix[modVal][3];
case 3:
*--p = numbersIn10Radix[modVal][2];
case 2:
*--p = numbersIn10Radix[modVal][1];
default:
*--p = numbersIn10Radix[modVal][0];
}
j /= numElem10Radix;
} while (j);
if (i < 0) *--p = '-';
return p;
}
That's part of my code in asm. It works only for range 255-0 It can be faster however here you can find direction and main idea.
4 imuls
1 memory read
1 memory write
You can try to reduce 2 imule's and use lea's with shifting. However you can't find anything faster in C/C++/Python ;)
void itoa_asm(unsigned char inVal, char *str)
{
__asm
{
// eax=100's -> (some_integer/100) = (some_integer*41) >> 12
movzx esi,inVal
mov eax,esi
mov ecx,41
imul eax,ecx
shr eax,12
mov edx,eax
imul edx,100
mov edi,edx
// ebx=10's -> (some_integer/10) = (some_integer*205) >> 11
mov ebx,esi
sub ebx,edx
mov ecx,205
imul ebx,ecx
shr ebx,11
mov edx,ebx
imul edx,10
// ecx = 1
mov ecx,esi
sub ecx,edx // -> sub 10's
sub ecx,edi // -> sub 100's
add al,'0'
add bl,'0'
add cl,'0'
//shl eax,
shl ebx,8
shl ecx,16
or eax,ebx
or eax,ecx
mov edi,str
mov [edi],eax
}
}
#Inge Henriksen
I believe your code has a bug:
IntToStr(2701987) == "2701987" //Correct
IntToStr(27001987) == "2701987" //Incorrect
Here's why your code is wrong:
modVal = i % numElem10Radix;
switch (reverseArrayLen[modVal])
{
case 5:
*--p = reverseArray[modVal][4];
case 4:
*--p = reverseArray[modVal][3];
case 3:
*--p = reverseArray[modVal][2];
case 2:
*--p = reverseArray[modVal][1];
default:
*--p = reverseArray[modVal][0];
}
i /= numElem10Radix;
There should be a leading 0 before "1987", which is "01987". But after the first iteration, you get 4 digits instead of 5.
So,
IntToStr(27000000) = "2700" //Incorrect
For unsigned 0 to 9,999,999 with terminating null. (99,999,999 without)
void itoa(uint64_t u, char *out) // up to 9,999,999 with terminating zero
{
*out = 0;
do {
uint64_t n0 = u;
*((uint64_t *)out) = (*((uint64_t *)out) << 8) | (n0 + '0' - (u /= 10) * 10);
} while (u);
}

Resources