Browse Source

another gcc 6 warning fix

compile on osx again
remove asm version of i?mult64 (was needed only for clang because it
didn't have __uint128_t, but it now has it)
master
Felix von Leitner 4 years ago
parent
commit
069f5c22c1
4 changed files with 18 additions and 51 deletions
  1. +12
    -4
      fmt/fmt_strn.c
  2. +6
    -0
      fmt/fmt_varint.c
  3. +0
    -24
      mult/imult64.c
  4. +0
    -23
      mult/umult64.c

+ 12
- 4
fmt/fmt_strn.c View File

@@ -5,10 +5,18 @@ size_t fmt_strn(char *out,const char *in,size_t limit) {
register const char* t=in;
register const char* u=in+limit;
for (;;) {
if (!*t || t==u) break; if (s) { *s=*t; ++s; } ++t;
if (!*t || t==u) break; if (s) { *s=*t; ++s; } ++t;
if (!*t || t==u) break; if (s) { *s=*t; ++s; } ++t;
if (!*t || t==u) break; if (s) { *s=*t; ++s; } ++t;
if (!*t || t==u) break;
if (s) { *s=*t; ++s; }
++t;
if (!*t || t==u) break;
if (s) { *s=*t; ++s; }
++t;
if (!*t || t==u) break;
if (s) { *s=*t; ++s; }
++t;
if (!*t || t==u) break;
if (s) { *s=*t; ++s; }
++t;
}
return (size_t)(t-in);
}

+ 6
- 0
fmt/fmt_varint.c View File

@@ -15,4 +15,10 @@ size_t fmt_varint(char* dest,unsigned long long l) {
return i;
}

#ifdef __ELF__
size_t fmt_pb_type0_int(char* dest,unsigned long long l) __attribute__((alias("fmt_varint")));
#else
size_t fmt_pb_type0_int(char* dest,unsigned long long l) {
return fmt_varint(dest,l);
}
#endif

+ 0
- 24
mult/imult64.c View File

@@ -4,28 +4,6 @@

int imult64( int64 a, int64 b, int64* c) { return !__builtin_mul_overflow(a,b,c); }

#else

#if defined(__x86_64__) && defined(__OPTIMIZE__)

/* WARNING: this only works if compiled with -fomit-frame-pointer */
void imult64() {
asm volatile(
"xchgq %rdx,%rsi\n"
"movq %rdi,%rax\n"
"imulq %rdx\n"
"jc 1f\n" /* overflow */
"movq %rax,(%rsi)\n"
"xorq %rax,%rax\n"
"inc %rax\n"
"ret\n"
"1:\n"
"xorq %rax,%rax\n"
/* the closing ret is renerated by gcc */
);
}


#else

#include "safemult.h"
@@ -54,5 +32,3 @@ int imult64(int64 a,int64 b,int64* c) {
#endif

#endif

#endif

+ 0
- 23
mult/umult64.c View File

@@ -8,27 +8,6 @@ int umult64(uint64 a,uint64 b,uint64* c) { return !__builtin_mul_overflow(a,b,c)

#include "haveuint128.h"

#if defined(__x86_64__) && defined(__OPTIMIZE__)

/* WARNING: this only works if compiled with -fomit-frame-pointer */
void umult64() {
asm volatile(
"xchgq %rdx,%rsi\n"
"movq %rdi,%rax\n"
"mulq %rdx\n"
"jc 1f\n" /* overflow */
"movq %rax,(%rsi)\n"
"xorq %rax,%rax\n"
"inc %rax\n"
"ret\n"
"1:\n"
"xorq %rax,%rax\n"
/* the closing ret is renerated by gcc */
);
}

#else

#include "safemult.h"

#if defined(HAVE_UINT128)
@@ -68,5 +47,3 @@ int umult64(uint64 a,uint64 b,uint64* c) {
#endif

#endif

#endif

Loading…
Cancel
Save