diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2022-11-10 16:07:04 +1000 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-02-04 06:19:42 -1000 |
commit | d1beee4da1dbbc0ce1bc42b38752366eed4babec (patch) | |
tree | 25847566e044b980acb4d407160d8423f1429dce /tcg/tcg-op.c | |
parent | 123ae5683c9e7815857304fd2f21664621c90a13 (diff) |
tcg: Split out tcg_gen_nonatomic_cmpxchg_i{32,64}
Normally this is automatically handled by the CF_PARALLEL checks
with in tcg_gen_atomic_cmpxchg_i{32,64}, but x86 has a special
case of !PREFIX_LOCK where it always wants the non-atomic version.
Split these out so that x86 does not have to roll its own.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/tcg-op.c')
-rw-r--r-- | tcg/tcg-op.c | 136 |
1 files changed, 88 insertions, 48 deletions
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 5811ecd3e7..c581ae77c4 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -3325,82 +3325,122 @@ static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be) }; +void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, + TCGv_i32 newv, TCGArg idx, MemOp memop) +{ + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); + + tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); + + tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); + tcg_gen_qemu_st_i32(t2, addr, idx, memop); + tcg_temp_free_i32(t2); + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(retv, t1, memop); + } else { + tcg_gen_mov_i32(retv, t1); + } + tcg_temp_free_i32(t1); +} + void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, TCGv_i32 newv, TCGArg idx, MemOp memop) { - memop = tcg_canonicalize_memop(memop, 0, 0); + gen_atomic_cx_i32 gen; + MemOpIdx oi; if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - TCGv_i32 t1 = tcg_temp_new_i32(); - TCGv_i32 t2 = tcg_temp_new_i32(); + tcg_gen_nonatomic_cmpxchg_i32(retv, addr, cmpv, newv, idx, memop); + return; + } - tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); + memop = tcg_canonicalize_memop(memop, 0, 0); + gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; + tcg_debug_assert(gen != NULL); - tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); - tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); - tcg_gen_qemu_st_i32(t2, addr, idx, memop); - tcg_temp_free_i32(t2); + oi = make_memop_idx(memop & ~MO_SIGN, idx); + gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + + if (memop & MO_SIGN) { + tcg_gen_ext_i32(retv, retv, memop); + } +} + +void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, + TCGv_i64 newv, TCGArg idx, MemOp memop) +{ + TCGv_i64 t1, t2; + if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { + tcg_gen_nonatomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), + TCGV_LOW(newv), idx, memop); if (memop & MO_SIGN) { - tcg_gen_ext_i32(retv, t1, memop); + tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); } else { - tcg_gen_mov_i32(retv, t1); + tcg_gen_movi_i32(TCGV_HIGH(retv), 0); } - tcg_temp_free_i32(t1); - } else { - gen_atomic_cx_i32 gen; - MemOpIdx oi; + return; + } - gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; - tcg_debug_assert(gen != NULL); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); - oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); - if (memop & MO_SIGN) { - tcg_gen_ext_i32(retv, retv, memop); - } + tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); + tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); + tcg_gen_qemu_st_i64(t2, addr, idx, memop); + tcg_temp_free_i64(t2); + + if (memop & MO_SIGN) { + tcg_gen_ext_i64(retv, t1, memop); + } else { + tcg_gen_mov_i64(retv, t1); } + tcg_temp_free_i64(t1); } void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, TCGv_i64 newv, TCGArg idx, MemOp memop) { - memop = tcg_canonicalize_memop(memop, 1, 0); - if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { - TCGv_i64 t1 = tcg_temp_new_i64(); - TCGv_i64 t2 = tcg_temp_new_i64(); - - tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); - - tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); - tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); - tcg_gen_qemu_st_i64(t2, addr, idx, memop); - tcg_temp_free_i64(t2); + tcg_gen_nonatomic_cmpxchg_i64(retv, addr, cmpv, newv, idx, memop); + return; + } - if (memop & MO_SIGN) { - tcg_gen_ext_i64(retv, t1, memop); - } else { - tcg_gen_mov_i64(retv, t1); - } - tcg_temp_free_i64(t1); - } else if ((memop & MO_SIZE) == MO_64) { -#ifdef CONFIG_ATOMIC64 + if ((memop & MO_SIZE) == MO_64) { gen_atomic_cx_i64 gen; - MemOpIdx oi; + memop = tcg_canonicalize_memop(memop, 1, 0); gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; - tcg_debug_assert(gen != NULL); + if (gen) { + MemOpIdx oi = make_memop_idx(memop, idx); + gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + return; + } - oi = make_memop_idx(memop, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); -#else gen_helper_exit_atomic(cpu_env); - /* Produce a result, so that we have a well-formed opcode stream - with respect to uses of the result in the (dead) code following. */ + + /* + * Produce a result for a well-formed opcode stream. This satisfies + * liveness for set before used, which happens before this dead code + * is removed. + */ tcg_gen_movi_i64(retv, 0); -#endif /* CONFIG_ATOMIC64 */ + return; + } + + if (TCG_TARGET_REG_BITS == 32) { + tcg_gen_atomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), + TCGV_LOW(newv), idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); + } else { + tcg_gen_movi_i32(TCGV_HIGH(retv), 0); + } } else { TCGv_i32 c32 = tcg_temp_new_i32(); TCGv_i32 n32 = tcg_temp_new_i32(); |