aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--accel/tcg/ldst_atomicity.c.inc40
-rw-r--r--host/include/generic/host/store-insert-al16.h50
2 files changed, 51 insertions, 39 deletions
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index 6063395e11..2514899408 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -10,6 +10,7 @@
*/
#include "host/load-extract-al16-al8.h"
+#include "host/store-insert-al16.h"
#ifdef CONFIG_ATOMIC64
# define HAVE_al8 true
@@ -682,45 +683,6 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
}
/**
- * store_atom_insert_al16:
- * @p: host address
- * @val: shifted value to store
- * @msk: mask for value to store
- *
- * Atomically store @val to @p masked by @msk.
- */
-static void ATTRIBUTE_ATOMIC128_OPT
-store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
-{
-#if defined(CONFIG_ATOMIC128)
- __uint128_t *pu, old, new;
-
- /* With CONFIG_ATOMIC128, we can avoid the memory barriers. */
- pu = __builtin_assume_aligned(ps, 16);
- old = *pu;
- do {
- new = (old & ~msk.u) | val.u;
- } while (!__atomic_compare_exchange_n(pu, &old, new, true,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED));
-#elif defined(CONFIG_CMPXCHG128)
- __uint128_t *pu, old, new;
-
- /*
- * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
- * defer to libatomic, so we must use __sync_*_compare_and_swap_16
- * and accept the sequential consistency that comes with it.
- */
- pu = __builtin_assume_aligned(ps, 16);
- do {
- old = *pu;
- new = (old & ~msk.u) | val.u;
- } while (!__sync_bool_compare_and_swap_16(pu, old, new));
-#else
- qemu_build_not_reached();
-#endif
-}
-
-/**
* store_bytes_leN:
* @pv: host address
* @size: number of bytes to store
diff --git a/host/include/generic/host/store-insert-al16.h b/host/include/generic/host/store-insert-al16.h
new file mode 100644
index 0000000000..4a1662183d
--- /dev/null
+++ b/host/include/generic/host/store-insert-al16.h
@@ -0,0 +1,50 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Atomic store insert into 128-bit, generic version.
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#ifndef HOST_STORE_INSERT_AL16_H
+#define HOST_STORE_INSERT_AL16_H
+
+/**
+ * store_atom_insert_al16:
+ * @p: host address
+ * @val: shifted value to store
+ * @msk: mask for value to store
+ *
+ * Atomically store @val to @p masked by @msk.
+ */
+static inline void ATTRIBUTE_ATOMIC128_OPT
+store_atom_insert_al16(Int128 *ps, Int128 val, Int128 msk)
+{
+#if defined(CONFIG_ATOMIC128)
+ __uint128_t *pu;
+ Int128Alias old, new;
+
+ /* With CONFIG_ATOMIC128, we can avoid the memory barriers. */
+ pu = __builtin_assume_aligned(ps, 16);
+ old.u = *pu;
+ msk = int128_not(msk);
+ do {
+ new.s = int128_and(old.s, msk);
+ new.s = int128_or(new.s, val);
+ } while (!__atomic_compare_exchange_n(pu, &old.u, new.u, true,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+#else
+ Int128 old, new, cmp;
+
+ ps = __builtin_assume_aligned(ps, 16);
+ old = *ps;
+ msk = int128_not(msk);
+ do {
+ cmp = old;
+ new = int128_and(old, msk);
+ new = int128_or(new, val);
+ old = atomic16_cmpxchg(ps, cmp, new);
+ } while (int128_ne(cmp, old));
+#endif
+}
+
+#endif /* HOST_STORE_INSERT_AL16_H */