|
|
@ -8,27 +8,6 @@ int umult64(uint64 a,uint64 b,uint64* c) { return !__builtin_mul_overflow(a,b,c) |
|
|
|
|
|
|
|
#include "haveuint128.h" |
|
|
|
|
|
|
|
#if defined(__x86_64__) && defined(__OPTIMIZE__) |
|
|
|
|
|
|
|
/* WARNING: this only works if compiled with -fomit-frame-pointer */ |
|
|
|
void umult64() { |
|
|
|
asm volatile( |
|
|
|
"xchgq %rdx,%rsi\n" |
|
|
|
"movq %rdi,%rax\n" |
|
|
|
"mulq %rdx\n" |
|
|
|
"jc 1f\n" /* overflow */ |
|
|
|
"movq %rax,(%rsi)\n" |
|
|
|
"xorq %rax,%rax\n" |
|
|
|
"inc %rax\n" |
|
|
|
"ret\n" |
|
|
|
"1:\n" |
|
|
|
"xorq %rax,%rax\n" |
|
|
|
/* the closing ret is renerated by gcc */ |
|
|
|
); |
|
|
|
} |
|
|
|
|
|
|
|
#else |
|
|
|
|
|
|
|
#include "safemult.h" |
|
|
|
|
|
|
|
#if defined(HAVE_UINT128) |
|
|
@ -68,5 +47,3 @@ int umult64(uint64 a,uint64 b,uint64* c) { |
|
|
|
#endif |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
#endif |