aboutsummaryrefslogtreecommitdiff
path: root/meson.build
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-11-05 11:34:58 +0000
committerRichard Henderson <richard.henderson@linaro.org>2023-05-16 15:21:39 -0700
commite61f1efeb730fd64441131ea721086065904ff67 (patch)
treec1c902b988700c06782fb573239a6cff0c0d3646 /meson.build
parent35c653c4029794f67a523191941104fe12f2b22d (diff)
meson: Detect atomic128 support with optimization
There is an edge condition prior to gcc13 for which optimization is required to generate 16-byte atomic sequences. Detect this. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'meson.build')
-rw-r--r--meson.build54
1 files changed, 35 insertions, 19 deletions
diff --git a/meson.build b/meson.build
index 5e2807ea7c..4dddccb890 100644
--- a/meson.build
+++ b/meson.build
@@ -2259,23 +2259,21 @@ config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles('''
return printf("%zu", SIZE_MAX);
}''', args: ['-Werror']))
-atomic_test = '''
+# See if 64-bit atomic operations are supported.
+# Note that without __atomic builtins, we can only
+# assume atomic loads/stores max at pointer size.
+config_host_data.set('CONFIG_ATOMIC64', cc.links('''
#include <stdint.h>
int main(void)
{
- @0@ x = 0, y = 0;
+ uint64_t x = 0, y = 0;
y = __atomic_load_n(&x, __ATOMIC_RELAXED);
__atomic_store_n(&x, y, __ATOMIC_RELAXED);
__atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
__atomic_exchange_n(&x, y, __ATOMIC_RELAXED);
__atomic_fetch_add(&x, y, __ATOMIC_RELAXED);
return 0;
- }'''
-
-# See if 64-bit atomic operations are supported.
-# Note that without __atomic builtins, we can only
-# assume atomic loads/stores max at pointer size.
-config_host_data.set('CONFIG_ATOMIC64', cc.links(atomic_test.format('uint64_t')))
+ }'''))
has_int128 = cc.links('''
__int128_t a;
@@ -2293,21 +2291,39 @@ if has_int128
# "do we have 128-bit atomics which are handled inline and specifically not
# via libatomic". The reason we can't use libatomic is documented in the
# comment starting "GCC is a house divided" in include/qemu/atomic128.h.
- has_atomic128 = cc.links(atomic_test.format('unsigned __int128'))
+ # We only care about these operations on 16-byte aligned pointers, so
+ # force 16-byte alignment of the pointer, which may be greater than
+ # __alignof(unsigned __int128) for the host.
+ atomic_test_128 = '''
+ int main(int ac, char **av) {
+ unsigned __int128 *p = __builtin_assume_aligned(av[ac - 1], sizeof(16));
+ p[1] = __atomic_load_n(&p[0], __ATOMIC_RELAXED);
+ __atomic_store_n(&p[2], p[3], __ATOMIC_RELAXED);
+ __atomic_compare_exchange_n(&p[4], &p[5], p[6], 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return 0;
+ }'''
+ has_atomic128 = cc.links(atomic_test_128)
config_host_data.set('CONFIG_ATOMIC128', has_atomic128)
if not has_atomic128
- has_cmpxchg128 = cc.links('''
- int main(void)
- {
- unsigned __int128 x = 0, y = 0;
- __sync_val_compare_and_swap_16(&x, y, x);
- return 0;
- }
- ''')
-
- config_host_data.set('CONFIG_CMPXCHG128', has_cmpxchg128)
+ # Even with __builtin_assume_aligned, the above test may have failed
+ # without optimization enabled. Try again with optimizations locally
+ # enabled for the function. See
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
+ has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128)
+ config_host_data.set('CONFIG_ATOMIC128_OPT', has_atomic128_opt)
+
+ if not has_atomic128_opt
+ config_host_data.set('CONFIG_CMPXCHG128', cc.links('''
+ int main(void)
+ {
+ unsigned __int128 x = 0, y = 0;
+ __sync_val_compare_and_swap_16(&x, y, x);
+ return 0;
+ }
+ '''))
+ endif
endif
endif