aboutsummaryrefslogtreecommitdiff
path: root/tcg/tcg-op.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-11-07 10:42:56 +1100
committerRichard Henderson <richard.henderson@linaro.org>2023-05-16 16:30:25 -0700
commit12fde9bcdb52118495d10c32ed375679f23e323c (patch)
treeaf2fc03fc6bf402cad84ec11be6799fd0a373f8f /tcg/tcg-op.c
parent7b8801071951c55dc506c1fca8b40ba292a28d6e (diff)
tcg: Add INDEX_op_qemu_{ld,st}_i128
Add opcodes for backend support for 128-bit memory operations. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/tcg-op.c')
-rw-r--r--tcg/tcg-op.c69
1 files changed, 63 insertions, 6 deletions
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index b13ded10df..c419228cc4 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -3205,7 +3205,7 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
{
- MemOpIdx oi = make_memop_idx(memop, idx);
+ const MemOpIdx oi = make_memop_idx(memop, idx);
tcg_debug_assert((memop & MO_SIZE) == MO_128);
tcg_debug_assert((memop & MO_SIGN) == 0);
@@ -3213,9 +3213,36 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
addr = plugin_prep_mem_callbacks(addr);
- /* TODO: allow the tcg backend to see the whole operation. */
+ /* TODO: For now, force 32-bit hosts to use the helper. */
+ if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
+ TCGv_i64 lo, hi;
+ TCGArg addr_arg;
+ MemOpIdx adj_oi;
+ bool need_bswap = false;
+
+ if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
+ lo = TCGV128_HIGH(val);
+ hi = TCGV128_LOW(val);
+ adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx);
+ need_bswap = true;
+ } else {
+ lo = TCGV128_LOW(val);
+ hi = TCGV128_HIGH(val);
+ adj_oi = oi;
+ }
+
+#if TARGET_LONG_BITS == 32
+ addr_arg = tcgv_i32_arg(addr);
+#else
+ addr_arg = tcgv_i64_arg(addr);
+#endif
+ tcg_gen_op4ii_i64(INDEX_op_qemu_ld_i128, lo, hi, addr_arg, adj_oi);
- if (use_two_i64_for_i128(memop)) {
+ if (need_bswap) {
+ tcg_gen_bswap64_i64(lo, lo);
+ tcg_gen_bswap64_i64(hi, hi);
+ }
+ } else if (use_two_i64_for_i128(memop)) {
MemOp mop[2];
TCGv addr_p8;
TCGv_i64 x, y;
@@ -3258,7 +3285,7 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
{
- MemOpIdx oi = make_memop_idx(memop, idx);
+ const MemOpIdx oi = make_memop_idx(memop, idx);
tcg_debug_assert((memop & MO_SIZE) == MO_128);
tcg_debug_assert((memop & MO_SIGN) == 0);
@@ -3266,9 +3293,39 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
addr = plugin_prep_mem_callbacks(addr);
- /* TODO: allow the tcg backend to see the whole operation. */
+ /* TODO: For now, force 32-bit hosts to use the helper. */
+
+ if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
+ TCGv_i64 lo, hi;
+ TCGArg addr_arg;
+ MemOpIdx adj_oi;
+ bool need_bswap = false;
+
+ if ((memop & MO_BSWAP) && !tcg_target_has_memory_bswap(memop)) {
+ lo = tcg_temp_new_i64();
+ hi = tcg_temp_new_i64();
+ tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val));
+ tcg_gen_bswap64_i64(hi, TCGV128_LOW(val));
+ adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx);
+ need_bswap = true;
+ } else {
+ lo = TCGV128_LOW(val);
+ hi = TCGV128_HIGH(val);
+ adj_oi = oi;
+ }
+
+#if TARGET_LONG_BITS == 32
+ addr_arg = tcgv_i32_arg(addr);
+#else
+ addr_arg = tcgv_i64_arg(addr);
+#endif
+ tcg_gen_op4ii_i64(INDEX_op_qemu_st_i128, lo, hi, addr_arg, adj_oi);
- if (use_two_i64_for_i128(memop)) {
+ if (need_bswap) {
+ tcg_temp_free_i64(lo);
+ tcg_temp_free_i64(hi);
+ }
+ } else if (use_two_i64_for_i128(memop)) {
MemOp mop[2];
TCGv addr_p8;
TCGv_i64 x, y;