diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2022-11-05 11:34:58 +0000 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-05-16 15:21:39 -0700 |
commit | e61f1efeb730fd64441131ea721086065904ff67 (patch) | |
tree | c1c902b988700c06782fb573239a6cff0c0d3646 | |
parent | 35c653c4029794f67a523191941104fe12f2b22d (diff) |
meson: Detect atomic128 support with optimization
There is an edge condition prior to gcc13 for which optimization
is required to generate 16-byte atomic sequences. Detect this.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r-- | accel/tcg/ldst_atomicity.c.inc | 29 | ||||
-rw-r--r-- | meson.build | 54 |
2 files changed, 60 insertions, 23 deletions
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc index ce73b32def..ba5db7c366 100644 --- a/accel/tcg/ldst_atomicity.c.inc +++ b/accel/tcg/ldst_atomicity.c.inc @@ -16,6 +16,23 @@ #endif #define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8) +/* + * If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics + * that are supported by the host, e.g. s390x. We can force the pointer to + * have our known alignment with __builtin_assume_aligned, however prior to + * GCC 13 that was only reliable with optimization enabled. See + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389 + */ +#if defined(CONFIG_ATOMIC128_OPT) +# if !defined(__OPTIMIZE__) +# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1"))) +# endif +# define CONFIG_ATOMIC128 +#endif +#ifndef ATTRIBUTE_ATOMIC128_OPT +# define ATTRIBUTE_ATOMIC128_OPT +#endif + #if defined(CONFIG_ATOMIC128) # define HAVE_al16_fast true #else @@ -152,7 +169,8 @@ static inline uint64_t load_atomic8(void *pv) * * Atomically load 16 aligned bytes from @pv. */ -static inline Int128 load_atomic16(void *pv) +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +load_atomic16(void *pv) { #ifdef CONFIG_ATOMIC128 __uint128_t *p = __builtin_assume_aligned(pv, 16); @@ -356,7 +374,8 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra, * cross an 16-byte boundary then the access must be 16-byte atomic, * otherwise the access must be 8-byte atomic. */ -static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s) +static inline uint64_t ATTRIBUTE_ATOMIC128_OPT +load_atom_extract_al16_or_al8(void *pv, int s) { #if defined(CONFIG_ATOMIC128) uintptr_t pi = (uintptr_t)pv; @@ -692,7 +711,8 @@ static inline void store_atomic8(void *pv, uint64_t val) * * Atomically store 16 aligned bytes to @pv. */ -static inline void store_atomic16(void *pv, Int128Alias val) +static inline void ATTRIBUTE_ATOMIC128_OPT +store_atomic16(void *pv, Int128Alias val) { #if defined(CONFIG_ATOMIC128) __uint128_t *pu = __builtin_assume_aligned(pv, 16); @@ -790,7 +810,8 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk) * * Atomically store @val to @p masked by @msk. */ -static void store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk) +static void ATTRIBUTE_ATOMIC128_OPT +store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk) { #if defined(CONFIG_ATOMIC128) __uint128_t *pu, old, new; diff --git a/meson.build b/meson.build index 5e2807ea7c..4dddccb890 100644 --- a/meson.build +++ b/meson.build @@ -2259,23 +2259,21 @@ config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles(''' return printf("%zu", SIZE_MAX); }''', args: ['-Werror'])) -atomic_test = ''' +# See if 64-bit atomic operations are supported. +# Note that without __atomic builtins, we can only +# assume atomic loads/stores max at pointer size. +config_host_data.set('CONFIG_ATOMIC64', cc.links(''' #include <stdint.h> int main(void) { - @0@ x = 0, y = 0; + uint64_t x = 0, y = 0; y = __atomic_load_n(&x, __ATOMIC_RELAXED); __atomic_store_n(&x, y, __ATOMIC_RELAXED); __atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); __atomic_exchange_n(&x, y, __ATOMIC_RELAXED); __atomic_fetch_add(&x, y, __ATOMIC_RELAXED); return 0; - }''' - -# See if 64-bit atomic operations are supported. -# Note that without __atomic builtins, we can only -# assume atomic loads/stores max at pointer size. -config_host_data.set('CONFIG_ATOMIC64', cc.links(atomic_test.format('uint64_t'))) + }''')) has_int128 = cc.links(''' __int128_t a; @@ -2293,21 +2291,39 @@ if has_int128 # "do we have 128-bit atomics which are handled inline and specifically not # via libatomic". The reason we can't use libatomic is documented in the # comment starting "GCC is a house divided" in include/qemu/atomic128.h. - has_atomic128 = cc.links(atomic_test.format('unsigned __int128')) + # We only care about these operations on 16-byte aligned pointers, so + # force 16-byte alignment of the pointer, which may be greater than + # __alignof(unsigned __int128) for the host. + atomic_test_128 = ''' + int main(int ac, char **av) { + unsigned __int128 *p = __builtin_assume_aligned(av[ac - 1], sizeof(16)); + p[1] = __atomic_load_n(&p[0], __ATOMIC_RELAXED); + __atomic_store_n(&p[2], p[3], __ATOMIC_RELAXED); + __atomic_compare_exchange_n(&p[4], &p[5], p[6], 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); + return 0; + }''' + has_atomic128 = cc.links(atomic_test_128) config_host_data.set('CONFIG_ATOMIC128', has_atomic128) if not has_atomic128 - has_cmpxchg128 = cc.links(''' - int main(void) - { - unsigned __int128 x = 0, y = 0; - __sync_val_compare_and_swap_16(&x, y, x); - return 0; - } - ''') - - config_host_data.set('CONFIG_CMPXCHG128', has_cmpxchg128) + # Even with __builtin_assume_aligned, the above test may have failed + # without optimization enabled. Try again with optimizations locally + # enabled for the function. See + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389 + has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128) + config_host_data.set('CONFIG_ATOMIC128_OPT', has_atomic128_opt) + + if not has_atomic128_opt + config_host_data.set('CONFIG_CMPXCHG128', cc.links(''' + int main(void) + { + unsigned __int128 x = 0, y = 0; + __sync_val_compare_and_swap_16(&x, y, x); + return 0; + } + ''')) + endif endif endif |