diff options
author | Richard Henderson <rth@twiddle.net> | 2013-02-16 12:46:59 -0800 |
---|---|---|
committer | Blue Swirl <blauwirbel@gmail.com> | 2013-02-17 14:28:57 +0000 |
commit | f540166b7dfdf4ec2057ac322d8cbfd0691e1d65 (patch) | |
tree | 9ed4b3eed2a58ad4c14ec8e728fedf70bbb4d905 /include | |
parent | be96bd3fbffde908a392c830c856063e122791c1 (diff) |
host-utils: Use __int128_t for mul[us]64
Replace some x86_64 specific inline assembly with something that
all 64-bit hosts ought to optimize well. At worst this becomes
a call to the gcc __multi3 routine, which is no worse than our
implementation in util/host-utils.c.
With gcc 4.7, we get identical code generation for x86_64. We
now get native multiplication on ia64 and s390x hosts. With minor
improvements to gcc we can get it for ppc64 as well.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/qemu/host-utils.h | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index f0dd850e1f..0f688c1c00 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -28,22 +28,21 @@ #include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */ #include <limits.h> -#if defined(__x86_64__) -#define __HAVE_FAST_MULU64__ +#ifdef CONFIG_INT128 static inline void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) { - __asm__ ("mul %0\n\t" - : "=d" (*phigh), "=a" (*plow) - : "a" (a), "0" (b)); + __uint128_t r = (__uint128_t)a * b; + *plow = r; + *phigh = r >> 64; } -#define __HAVE_FAST_MULS64__ + static inline void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) { - __asm__ ("imul %0\n\t" - : "=d" (*phigh), "=a" (*plow) - : "a" (a), "0" (b)); + __int128_t r = (__int128_t)a * b; + *plow = r; + *phigh = r >> 64; } #else void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b); |