CPU usage C Packed struct vs Unsigned Long Long operations - c

I need to do some operations with 48 bit variables, so I had two options:
Create my own structure with 48 bit variables, or
Use unsigned long long (64 bits).
As the operations will not overflow 48 bits, I considered that using 64 bit variables was an overkill, so I created a base structure
#ifdef __GNUC__
#define PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))
#endif
#ifdef _MSC_VER
#define PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))
#endif
PACK(struct uint48 {
unsigned long long v : 48;
});
and created some code to check for speed in the operations
#include <stdio.h>
#include <time.h>
#ifdef __GNUC__
#define PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))
#endif
#ifdef _MSC_VER
#define PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))
#endif
PACK(struct uint48 {
unsigned long long v : 48;
});
void TestProductLong();
void TestProductLong02();
void TestProductPackedStruct();
void TestProductPackedStruct02();
clock_t start, end;
double cpu_time_used;
int cycleNumber = 100000;
int main(void)
{
TestProductLong();
TestProductLong02();
TestProductPackedStruct();
TestProductPackedStruct02();
return 0;
}
void TestProductLong() {
start = clock();
for (int i = 0; i < cycleNumber;i++) {
unsigned long long varlong01 = 155782;
unsigned long long varlong02 = 15519994;
unsigned long long product01 = varlong01 * varlong02;
unsigned long long varlong03 = 155782;
unsigned long long varlong04 = 15519994;
unsigned long long product02 = varlong03 * varlong04;
unsigned long long addition = product01 + product02;
}
end = clock();
cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("TestProductLong() took %f seconds to execute \n", cpu_time_used);
}
void TestProductLong02() {
start = clock();
unsigned long long varlong01;
unsigned long long varlong02;
unsigned long long product01;
unsigned long long varlong03;
unsigned long long varlong04;
unsigned long long product02;
unsigned long long addition;
for (int i = 0; i < cycleNumber;i++) {
varlong01 = 155782;
varlong02 = 15519994;
product01 = varlong01 * varlong02;
varlong03 = 155782;
varlong04 = 15519994;
product02 = varlong03 * varlong04;
addition = product01 + product02;
}
end = clock();
cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("TestProductLong02() took %f seconds to execute \n", cpu_time_used);
}
void TestProductPackedStruct() {
start = clock();
for (int i = 0; i < cycleNumber; i++) {
struct uint48 x01;
struct uint48 x02;
struct uint48 x03;
x01.v = 155782;
x02.v = 15519994;
x03.v = x01.v * x02.v;
struct uint48 x04;
struct uint48 x05;
struct uint48 x06;
x04.v = 155782;
x05.v = 15519994;
x06.v = x04.v * x05.v;
struct uint48 x07;
x07.v = x03.v + x06.v;
}
end = clock();
cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("TestProductPackedStruct() took %f seconds to execute \n", cpu_time_used);
}
void TestProductPackedStruct02() {
start = clock();
struct uint48 x01;
struct uint48 x02;
struct uint48 x03;
struct uint48 x04;
struct uint48 x05;
struct uint48 x06;
struct uint48 x07;
for (int i = 0; i < cycleNumber; i++) {
x01.v = 155782;
x02.v = 15519994;
x03.v = x01.v * x02.v;
x04.v = 155782;
x05.v = 15519994;
x06.v = x04.v * x05.v;
x07.v = x03.v + x06.v;
}
end = clock();
cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("TestProductPackedStruct02() took %f seconds to execute \n", cpu_time_used);
}
But I got the following results
TestProductLong() took 0.000188 seconds to execute
TestProductLong02() took 0.000198 seconds to execute
TestProductPackedStruct() took 0.001231 seconds to execute
TestProductPackedStruct02() took 0.001231 seconds to execute
So the operations using unsigned long long took less time than the ones using the packed structure.
Why is that?
Would be better then to use the unsigned long long instead?
Is there a better way to pack structures?
As I'm right now unrolling loops, using the correct datastructure could impact the performance of my application significantly.
Thank you.

Although you know that the operations on the 48-bit values will not overflow, a compiler cannot know this! Further, with the vast majority of compilers and platforms, your uint48 structure will actually be implemented as a 64-bit data type, for which only the low 48-bits will ever be used.
So, after any arithmetic (or other) operations on the .v data, the 'unused' 16-bits of the (internal) 64-bit representation will need to be cleared, to ensure that any future accesses to that data will give the true, 48-bit-only value.
Thus, using the clang-cl compiler in Visual Studio 2019, the following (rather trivial) function using the native uint64_t type:
extern uint64_t add64(uint64_t a, uint64_t b) {
return a + b;
}
generates the expected, highly efficient assembly code:
lea rax, [rcx + rdx]
ret
However, using (an equivalent of) your 48-bit packed structure:
#pragma pack(push, 1)
typedef struct uint48 {
unsigned long long v : 48;
} uint48_t;
#pragma pack(pop)
extern uint48_t add48(uint48_t a, uint48_t b) {
uint48_t c;
c.v = a.v + b.v;
return c;
}
requires additional assembly code to ensure that any overflow into the 'unused' bits is discarded:
add rcx, rdx
movabs rax, 281474976710655 # This is 0x0000FFFFFFFFFFFF - clearing top 16 bits!
and rax, rcx
ret
Note that the MSVC compiler generates very similar code.
Thus, you should expect that using native, uint64_t variables will generate more efficient code than your 'space-saving' structure.

Your test procedure is wrong. Why?
Packing 1 member struct does actually nothing.
You execute it using -O0 and with no optimizations testing the execution speed does not make any sense. It you compile it with optimizations - your code will be wiped out :) https://godbolt.org/z/9ibP_8
When you sort this code to be optimizable (As you do not use the value they have to be global or at least static and adding compiler memory barrier (clobber)).
https://godbolt.org/z/BL9uJE
The difference comes with trimming the results to 48 bits.
If you pack the struct (which is not necesary here) you force compiler to byte access the variables - because only bytes are always aligned: https://godbolt.org/z/2iV7vq
You can also use the mixed approach - not portable as it relies on endianess and bitfields implementation https://godbolt.org/z/J3-it_
so the code will compile to:
unsigned long long:
mov QWORD PTR varlong01[rip], 155782
mov QWORD PTR varlong02[rip], 15519994
mov QWORD PTR product01[rip], rdx
mov QWORD PTR varlong03[rip], 155782
mov QWORD PTR varlong04[rip], 15519994
mov QWORD PTR product02[rip], rdx
mov QWORD PTR addition[rip], rcx
not packed struct
mov rdx, QWORD PTR x01[rip]
and rdx, rax
or rdx, 155782
mov QWORD PTR x01[rip], rdx
mov rdx, QWORD PTR x02[rip]
and rdx, rax
or rdx, 15519994
mov QWORD PTR x02[rip], rdx
mov rdx, QWORD PTR x03[rip]
and rdx, rax
or rdx, rsi
mov QWORD PTR x03[rip], rdx
mov rdx, QWORD PTR x04[rip]
and rdx, rax
or rdx, 155782
mov QWORD PTR x04[rip], rdx
mov rdx, QWORD PTR x05[rip]
and rdx, rax
or rdx, 15519994
mov QWORD PTR x05[rip], rdx
mov rdx, QWORD PTR x06[rip]
and rdx, rax
or rdx, rsi
mov QWORD PTR x06[rip], rdx
mov rdx, QWORD PTR x07[rip]
and rdx, rax
or rdx, rdi
mov QWORD PTR x07[rip], rdx
packed struct
mov BYTE PTR x01[rip], -122
mov BYTE PTR x01[rip+1], 96
mov BYTE PTR x01[rip+2], 2
mov BYTE PTR x01[rip+3], 0
mov BYTE PTR x01[rip+4], 0
mov BYTE PTR x01[rip+5], 0
mov BYTE PTR x02[rip], -6
mov BYTE PTR x02[rip+1], -48
mov BYTE PTR x02[rip+2], -20
mov BYTE PTR x02[rip+3], 0
mov BYTE PTR x02[rip+4], 0
mov BYTE PTR x02[rip+5], 0
mov BYTE PTR x03[rip], -36
mov BYTE PTR x03[rip+1], 34
mov BYTE PTR x03[rip+2], 71
mov BYTE PTR x03[rip+3], -20
mov BYTE PTR x03[rip+4], 50
mov BYTE PTR x03[rip+5], 2
mov BYTE PTR x04[rip], -122
mov BYTE PTR x04[rip+1], 96
mov BYTE PTR x04[rip+2], 2
mov BYTE PTR x04[rip+3], 0
mov BYTE PTR x04[rip+4], 0
mov BYTE PTR x04[rip+5], 0
mov BYTE PTR x05[rip], -6
mov BYTE PTR x05[rip+1], -48
mov BYTE PTR x05[rip+2], -20
mov BYTE PTR x05[rip+3], 0
mov BYTE PTR x05[rip+4], 0
mov BYTE PTR x05[rip+5], 0
mov BYTE PTR x06[rip], -36
mov BYTE PTR x06[rip+1], 34
mov BYTE PTR x06[rip+2], 71
mov BYTE PTR x06[rip+3], -20
mov BYTE PTR x06[rip+4], 50
mov BYTE PTR x06[rip+5], 2
mov BYTE PTR x07[rip], -72
mov BYTE PTR x07[rip+1], 69
mov BYTE PTR x07[rip+2], -114
mov BYTE PTR x07[rip+3], -40
mov BYTE PTR x07[rip+4], 101
mov BYTE PTR x07[rip+5], 4

Related

Pointer and array usage confusion

There is a code excerpt from official Quake 2 source code:
unsigned *buf;
dheader_t header;
...
header = *(dheader_t *)buf; // #1
for (i=0 ; i<sizeof(dheader_t)/4 ; i++)
((int *)&header)[i] = LittleLong ( ((int *)&header)[i]); // #2
Can someone please explain me in the most possible details what do the line #1 and then #2 really do because I'm little or more confused...
P.S
Here is the rest of the definitions if it helps:
int LittleLong (int l) {return _LittleLong(l);}
...
typedef struct
{
int ident;
int version;
lump_t lumps[HEADER_LUMPS];
} dheader_t;
P.S. 2
I've linked above the original full source file code if needed.
This is some seriously brittle code and you shouldn't write code like this.
What it does is to go through the struct int by int, then does something with each such int inside _LittleLong. Very likely this function performs a 32 bit conversion from a big endian integer to a little endian one. Meaning that the source you are looking at is likely something related to reception of IP packages.
Checking at what the code does step by step:
for (i=0 ; i<sizeof(dheader_t)/4 ; i++) is a sloppier way of writing sizeof(dheader_t)/sizeof(int). That is: iterate through the struct int by int, chunks of 32 bits.
(int *)&header converts from a dheader_t* to a int*. This is actually well-defined by a special rule in C that allows us to convert from a pointer to a struct to a pointer to its first member or vice versa and the first member is int.
However, doing so is only well-defined for the first member. Instead they take the converted int* and apply array dereferencing on it: ((int *)&header)[i]. This is undefined behavior in C, a so-called strict aliasing violation, and could also cause alignment problems in some situations. Bad.
The int read from the struct through this dereferencing is then passed along to LittleLong which very likely does a big -> little endian conversion.
((int *)&header)[i] = and here it is written back to where it was grabbed from.
Better, safer, well-defined and possibly faster code could look like:
void endianify (dheader_t* header)
{
_Static_assert(sizeof(dheader_t)%sizeof(uint32_t)==0,
"Broken struct: dheader_t");
unsigned char* start = (unsigned char*)header;
unsigned char* end = start + sizeof(dheader_t);
for(unsigned char* i=start; i!=end; i+=sizeof(uint32_t))
{
uint32_t tmp;
memcpy(&tmp,i,sizeof(uint32_t));
i[0]= (tmp >> 24) & 0xFF;
i[1]= (tmp >> 16) & 0xFF;
i[2]= (tmp >> 8) & 0xFF;
i[3]= (tmp >> 0) & 0xFF;
}
}
Disassembly:
endianify:
mov eax, DWORD PTR [rdi]
bswap eax
mov DWORD PTR [rdi], eax
mov eax, DWORD PTR [rdi+4]
bswap eax
mov DWORD PTR [rdi+4], eax
mov eax, DWORD PTR [rdi+8]
bswap eax
mov DWORD PTR [rdi+8], eax
mov eax, DWORD PTR [rdi+12]
bswap eax
mov DWORD PTR [rdi+12], eax
mov eax, DWORD PTR [rdi+16]
bswap eax
mov DWORD PTR [rdi+16], eax
ret

How to make gcc or clang use 64-bit/32-bit division instead of 128-bit/64-bit division when the dividend is 64-bit and the quotient is 32-bit?

Currently, from research and various attempts, I'm pretty sure that the only solution to this problem is to use assembly. I'm posting this question to show an existing problem, and maybe get attention from compiler developers, or get some hits from searches about similar problems.
If anything changes in the future, I will accept it as an answer.
This is a very related question for MSVC.
In x86_64 machines, it is faster to use div/idiv with a 32-bit operand than a 64-bit operand. When the dividend is 64-bit and the divisor is 32-bit, and when you know that the quotient will fit in 32 bits, you don't have to use the 64-bit div/idiv. You can split the 64-bit dividend into two 32-bit registers, and even with this overhead, performing a 32-bit div on two 32-bit registers will be faster than doing a 64-bit div with a full 64-bit register.
The compiler will produce a 64-bit div with this function, and that is correct because for a 32-bit div, if the quotient of the division does not fit in 32 bits, an hardware exception occurs.
uint32_t div_c(uint64_t a, uint32_t b) {
return a / b;
}
However, if the quotient is known to be fit in 32 bits, doing a full 64-bit division is unnecessary. I used __builtin_unreachable to tell the compiler about this information, but it doesn't make a difference.
uint32_t div_c_ur(uint64_t a, uint32_t b) {
uint64_t q = a / b;
if (q >= 1ull << 32) __builtin_unreachable();
return q;
}
For both div_c and div_c_ur, the output from gcc is,
mov rax, rdi
mov esi, esi
xor edx, edx
div rsi
ret
clang does an interesting optimization of checking the dividend size, but it still uses a 64-bit div when the dividend is 64-bit.
mov rax, rdi
mov ecx, esi
mov rdx, rdi
shr rdx, 32
je .LBB0_1
xor edx, edx
div rcx
ret
.LBB0_1:
xor edx, edx
div ecx
ret
I had to write straight in assembly to achieve what I want. I couldn't find any other way to do this.
__attribute__((naked, sysv_abi))
uint32_t div_asm(uint64_t, uint32_t) {__asm__(
"mov eax, edi\n\t"
"mov rdx, rdi\n\t"
"shr rdx, 32\n\t"
"div esi\n\t"
"ret\n\t"
);}
Was it worth it? At least perf reports 49.47% overhead from div_c while 24.88% overhead from div_asm, so on my computer (Tiger Lake), div r32 is about 2 times faster than div r64.
This is the benchmark code.
#include <stdint.h>
#include <stdio.h>
__attribute__((noinline))
uint32_t div_c(uint64_t a, uint32_t b) {
uint64_t q = a / b;
if (q >= 1ull << 32) __builtin_unreachable();
return q;
}
__attribute__((noinline, naked, sysv_abi))
uint32_t div_asm(uint64_t, uint32_t) {__asm__(
"mov eax, edi\n\t"
"mov rdx, rdi\n\t"
"shr rdx, 32\n\t"
"div esi\n\t"
"ret\n\t"
);}
static uint64_t rdtscp() {
uint32_t _;
return __builtin_ia32_rdtscp(&_);
}
int main() {
#define n 500000000ll
uint64_t c;
c = rdtscp();
for (int i = 1; i <= n; ++i) {
volatile uint32_t _ = div_c(i + n * n, i + n);
}
printf(" c%15ul\n", rdtscp() - c);
c = rdtscp();
for (int i = 1; i <= n; ++i) {
volatile uint32_t _ = div_asm(i + n * n, i + n);
}
printf("asm%15ul\n", rdtscp() - c);
}
Every idea in this answer is based on comments by Nate Eldredge, from which I discovered some powerfulness of gcc's extended inline assembly. Even though I still have to write assembly, it is possible to create a custom as-if intrinsic function.
static inline uint32_t divqd(uint64_t a, uint32_t b) {
if (__builtin_constant_p(b)) {
return a / b;
}
uint32_t lo = a;
uint32_t hi = a >> 32;
__asm__("div %2" : "+a" (lo), "+d" (hi) : "rm" (b));
return lo;
}
__builtin_constant_p returns 1 if b can be evaluated in compile-time. +a and +d means values are read from and written to a and d registers (eax and edx). rm specifies that the input b can either be a register or memory operand.
To see if inlining and constant propagation is done smoothly,
uint32_t divqd_r(uint64_t a, uint32_t b) {
return divqd(a, b);
}
divqd_r:
mov rdx, rdi
mov rax, rdi
shr rdx, 32
div esi
ret
uint32_t divqd_m(uint64_t a) {
extern uint32_t b;
return divqd(a, b);
}
divqd_m:
mov rdx, rdi
mov rax, rdi
shr rdx, 32
div DWORD PTR b[rip]
ret
uint32_t divqd_c(uint64_t a) {
return divqd(a, 12345);
}
divqd_c:
movabs rdx, 6120523590596543007
mov rax, rdi
mul rdx
shr rdx, 12
mov eax, edx
ret
and the results are satisfying (https://godbolt.org/z/47PE4ovMM).

Division performance for a x32 ELF on a x64 OS

In the following example running a 32-bit ELF on a 64-bit architecture is faster and I don't understand why. I have tried with two examples one using a division the other one with a multiplication. The performance is as expected, however, the performance for the division is surprizing.
We see on the assembly that the compiler is calling _alldiv which emulates a 64-bit division on a 32-bit architecture, so it must be slower than simply using the assembly instruction idiv. So I don't understand the results I got:
My setup is: Windows 10 x64, Visual Studio 2019
To time the code I use Measure-Command { .\out.exe }:
Multiplication
32-bit ELF: 3360 ms
64-bit ELF: 1469 ms
Division
32-bit ELF: 7383 ms
64-bit ELF: 8567 ms
Code
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <limits.h>
#include <Windows.h>
volatile int64_t m = 32;
volatile int64_t n = 12;
volatile int64_t result;
int main(void)
{
for (size_t i = 0; i < (1 << 30); i++)
{
# ifdef DIVISION
result = m / n;
# else
result = m * n;
# endif
m += 1;
n += 3;
}
}
64-bit disassembly (division)
for (size_t i = 0; i < (1 << 30); i++)
00007FF60DA81000 mov r8d,40000000h
00007FF60DA81006 nop word ptr [rax+rax]
{
result = m / n;
00007FF60DA81010 mov rcx,qword ptr [n (07FF60DA83038h)]
00007FF60DA81017 mov rax,qword ptr [m (07FF60DA83040h)]
00007FF60DA8101E cqo
00007FF60DA81020 idiv rax,rcx
00007FF60DA81023 mov qword ptr [result (07FF60DA83648h)],rax
m += 1;
00007FF60DA8102A mov rax,qword ptr [m (07FF60DA83040h)]
00007FF60DA81031 inc rax
00007FF60DA81034 mov qword ptr [m (07FF60DA83040h)],rax
n += 3;
00007FF60DA8103B mov rax,qword ptr [n (07FF60DA83038h)]
00007FF60DA81042 add rax,3
00007FF60DA81046 mov qword ptr [n (07FF60DA83038h)],rax
00007FF60DA8104D sub r8,1
00007FF60DA81051 jne main+10h (07FF60DA81010h)
}
}
32-bit disassembly (division)
for (size_t i = 0; i < (1 << 30); i++)
00A41002 mov edi,40000000h
00A41007 nop word ptr [eax+eax]
{
result = m / n;
00A41010 mov edx,dword ptr [n (0A43018h)]
00A41016 mov eax,dword ptr ds:[00A4301Ch]
00A4101B mov esi,dword ptr [m (0A43020h)]
00A41021 mov ecx,dword ptr ds:[0A43024h]
00A41027 push eax
00A41028 push edx
00A41029 push ecx
00A4102A push esi
00A4102B call _alldiv (0A41CD0h)
00A41030 mov dword ptr [result (0A433A0h)],eax
00A41035 mov dword ptr ds:[0A433A4h],edx
m += 1;
00A4103B mov eax,dword ptr [m (0A43020h)]
00A41040 mov ecx,dword ptr ds:[0A43024h]
00A41046 add eax,1
00A41049 mov dword ptr [m (0A43020h)],eax
00A4104E adc ecx,0
00A41051 mov dword ptr ds:[0A43024h],ecx
n += 3;
00A41057 mov eax,dword ptr [n (0A43018h)]
00A4105C mov ecx,dword ptr ds:[0A4301Ch]
00A41062 add eax,3
00A41065 mov dword ptr [n (0A43018h)],eax
00A4106A adc ecx,0
00A4106D mov dword ptr ds:[0A4301Ch],ecx
00A41073 sub edi,1
00A41076 jne main+10h (0A41010h)
}
}
Edit
To investigate further as Chris Dodd, I have slightly modified my code as follow:
volatile int64_t m = 32000000000;
volatile int64_t n = 12000000000;
volatile int64_t result;
This time I have these results:
Division
32-bit ELF: 22407 ms
64-bit ELF: 17812 ms
If you look at instruction timings for x86 processors, it turns out that on recent Intel processors, a 64-bit divide is 3-4x as expensive as a 32-bit divide -- and if you look at the internals of alldiv (link in the comments above), for your values which will always fit in 32 bits, it will use a single 32-bit divide...

Arithmetic operations with large numbers in assembly

I got a task to write an assembly routine that can be read from C and declared as follows:
extern int solve_equation(long int a, long int b,long int c, long int *x, long int *y);
that finds a solution to the equation
a * x + b * y = c
In -2147483648 <x, y <2147483647 by checking all options.
The value returned from the routine will be 1 if a solution is found and another 0.
You must take into consideration that the results of the calculations: a * x, b * y, a * x + b * y can exceed 32 bits.
.MODEL SMALL
.DATA
C DQ ?
SUM DQ 0
MUL1 DQ ?
MUL2 DQ ?
X DD ?
Y DD ?
.CODE
.386
PUBLIC _solve_equation
_solve_equation PROC NEAR
PUSH BP
MOV BP,SP
PUSH SI
MOV X,-2147483648
MOV Y,-2147483648
MOV ECX,4294967295
FOR1:
CMP ECX,0
JE FALSE
PUSH ECX
MOV Y,-2147483648
MOV ECX,4294967295
FOR2:
MOV SUM,0
CMP ECX,0
JE SET_FOR1
MOV EAX,DWORD PTR [BP+4]
IMUL X
MOV DWORD PTR MUL1,EAX
MOV DWORD PTR MUL1+4,EDX
MOV EAX,DWORD PTR [BP+8]
IMUL Y
MOV DWORD PTR MUL2,EAX
MOV DWORD PTR MUL2+4,EDX
MOV EAX, DWORD PTR MUL1
ADD DWORD PTR SUM,EAX
MOV EAX, DWORD PTR MUL2
ADD DWORD PTR SUM,EAX
MOV EAX, DWORD PTR MUL1+4
ADD DWORD PTR SUM+4,EAX
MOV EAX, DWORD PTR MUL2+4
ADD DWORD PTR SUM+4,EAX
CMP SUM,-2147483648
JL SET_FOR2
CMP SUM,2147483647
JG SET_FOR2
MOV EAX,DWORD PTR [BP+12]
CMP DWORD PTR SUM,EAX
JE TRUE
SET_FOR2:
DEC ECX
INC Y
JMP FOR2
SET_FOR1:
POP ECX
DEC ECX
JMP FOR1
FALSE:
MOV AX,0
JMP SOF
TRUE:
MOV SI,WORD PTR [BP+16]
MOV EAX,X
MOV DWORD PTR [SI],EAX
MOV SI,WORD PTR [BP+18]
MOV EAX,Y
MOV DWORD PTR [SI],EAX
MOV AX,1
SOF:
POP SI
POP BP
RET
_solve_equation ENDP
END
Is this the right way to work with large numbers?
I get argument to operation or instruction has illegal size when I try to do:
MOV SUM,0
CMP SUM,-2147483648
CMP SUM,2147483647
main code:
int main()
{
long int x, y, flag;
flag = solve_equation(-5,4,2147483647,&x, &y);
if (flag == 1)
printf("%ld*%ld + %ld*%ld = %ld\n", -5L,x,4L,y,2147483647);
return 0;
}
output
-5*-2147483647 + 4*-2147483647 = 2147483647
I`m using dosbox 0.74 and tcc
You're using 16-bit code, so 64-bit operand-size isn't available. Your assembler magically associates a size with sum, because you defined it with sum dq 0.
So mov sum, 0 is equivalent to mov qword ptr [sum], 0, which of course won't assemble in 16 or 32-bit mode; you can only operate on up to 32 bits at once with integer operations.
(32-bit operand-size is available in 16-bit mode on 386-compatible CPUs, using the same machine encodings that allows 16-bit operand size in 32-bit mode. But 64-bit operand size is only available in 64-bit mode. Unlike 386, AMD64 didn't add any new prefixes or anything to previously-existing modes, for various reasons.)
You could zero the whole 64-bit sum with an SSE store, or even compare with SSE4.2 pcmpgtq, but that's probably not what you want.
It looks like you want to check if 64-bit sum fits in 32 bits. (i.e. if it is a sign-extended 32-bit integer).
So really you just need to check that all 32 high bits are the same and match bit 31 of the low half.
mov eax, dword ptr [sum]
cdq ; sign extend eax into edx:eax
; i.e. copy bit 31 of EAX to all bits of EDX
cmp edx, dword ptr [sum+4]
je small_sum

Assembly how to translate JNE to C Code without ZF flag access

ASM to C Code emulating nearly done.. just trying to solve these second pass problems.
Lets say I got this ASM function
401040 MOV EAX,DWORD PTR [ESP+8]
401044 MOV EDX,DWORD PTR [ESP+4]
401048 PUSH ESI
401049 MOV ESI,ECX
40104B MOV ECX,EAX
40104D DEC EAX
40104E TEST ECX,ECX
401050 JE 401083
401052 PUSH EBX
401053 PUSH EDI
401054 LEA EDI,[EAX+1]
401057 MOV AX,WORD PTR [ESI]
40105A XOR EBX,EBX
40105C MOV BL,BYTE PTR [EDX]
40105E MOV ECX,EAX
401060 AND ECX,FFFF
401066 SHR ECX,8
401069 XOR ECX,EBX
40106B XOR EBX,EBX
40106D MOV BH,AL
40106F MOV AX,WORD PTR [ECX*2+45F81C]
401077 XOR AX,BX
40107A INC EDX
40107B DEC EDI
40107C MOV WORD PTR [ESI],AX
40107F JNE 401057
401081 POP EDI
401082 POP EBX
401083 POP ESI
401084 RET 8
My program would create the following for it.
int Func_401040() {
regs.d.eax = *(unsigned int *)(regs.d.esp+0x00000008);
regs.d.edx = *(unsigned int *)(regs.d.esp+0x00000004);
regs.d.esp -= 4;
*(unsigned int *)(regs.d.esp) = regs.d.esi;
regs.d.esi = regs.d.ecx;
regs.d.ecx = regs.d.eax;
regs.d.eax--;
if(regs.d.ecx == 0)
goto label_401083;
regs.d.esp -= 4;
*(unsigned int *)(regs.d.esp) = regs.d.ebx;
regs.d.esp -= 4;
*(unsigned int *)(regs.d.esp) = regs.d.edi;
regs.d.edi = (regs.d.eax+0x00000001);
regs.x.ax = *(unsigned short *)(regs.d.esi);
regs.d.ebx ^= regs.d.ebx;
regs.h.bl = *(unsigned char *)(regs.d.edx);
regs.d.ecx = regs.d.eax;
regs.d.ecx &= 0x0000FFFF;
regs.d.ecx >>= 0x00000008;
regs.d.ecx ^= regs.d.ebx;
regs.d.ebx ^= regs.d.ebx;
regs.h.bh = regs.h.al;
regs.x.ax = *(unsigned short *)(regs.d.ecx*0x00000002+0x0045F81C);
regs.x.ax ^= regs.x.bx;
regs.d.edx++;
regs.d.edi--;
*(unsigned short *)(regs.d.esi) = regs.x.ax;
JNE 401057
regs.d.edi = *(unsigned int *)(regs.d.esp);
regs.d.esp += 4;
regs.d.ebx = *(unsigned int *)(regs.d.esp);
regs.d.esp += 4;
label_401083:
regs.d.esi = *(unsigned int *)(regs.d.esp);
regs.d.esp += 4;
return 0x8;
}
Since JNE 401057 doesn't use the CMP or TEST
How do I fix that use this in C code?
The most recent instruction that modified flags is the dec, which sets ZF when its operand hits 0. So the jne is about equivalent to if (regs.d.edi != 0) goto label_401057;.
BTW: ret 8 isn't equivalent to return 8. The ret instruction's operand is the number of bytes to add to ESP when returning. (It's commonly used to clean up the stack.) It'd be kinda like
return eax;
regs.d.esp += 8;
except that semi-obviously, this won't work in C -- the return makes any code after it unreachable.
This is actually a part of the calling convention -- [ESP+4] and [ESP+8] are arguments passed to the function, and the ret is cleaning those up. This isn't the usual C calling convention; it looks more like fastcall or thiscall, considering the function expects a value in ECX.

Resources