diff options
author | Alex Bennée <alex.bennee@linaro.org> | 2016-12-02 17:34:54 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2016-12-05 17:52:01 +0000 |
commit | 5460da501a57cd72eda6fec736d76539122e2f99 (patch) | |
tree | b521fd839b2458fcb8e255475f4e23a276fed4a9 /target-arm/translate-a64.c | |
parent | bc66cedb4141fb7588f2462c74310d8fb5dd4cf1 (diff) |
target-arm/translate-a64: fix gen_load_exclusive
While testing rth's latest TCG patches with risu I found ldaxp was
broken. Investigating further I found it was broken by 1dd089d0 when
the cmpxchg atomic work was merged. As part of that change the code
attempted to be clever by doing a single 64 bit load and then shuffle
the data around to set the two 32 bit registers.
As I couldn't quite follow the endian magic I've simply partially
reverted the change to the original code gen_load_exclusive code. This
doesn't affect the cmpxchg functionality as that is all done on in
gen_store_exclusive part which is untouched.
I've also restored the comment that was removed (with a slight tweak
to mention cmpxchg).
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Acked-by: Richard Henderson <rth@twiddle.net>
Message-id: 20161202173454.19179-1-alex.bennee@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-arm/translate-a64.c')
-rw-r--r-- | target-arm/translate-a64.c | 42 |
1 files changed, 19 insertions, 23 deletions
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c index de48747376..6dc27a6115 100644 --- a/target-arm/translate-a64.c +++ b/target-arm/translate-a64.c @@ -1839,41 +1839,37 @@ static void disas_b_exc_sys(DisasContext *s, uint32_t insn) } } +/* + * Load/Store exclusive instructions are implemented by remembering + * the value/address loaded, and seeing if these are the same + * when the store is performed. This is not actually the architecturally + * mandated semantics, but it works for typical guest code sequences + * and avoids having to monitor regular stores. + * + * The store exclusive uses the atomic cmpxchg primitives to avoid + * races in multi-threaded linux-user and when MTTCG softmmu is + * enabled. + */ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i64 addr, int size, bool is_pair) { TCGv_i64 tmp = tcg_temp_new_i64(); - TCGMemOp be = s->be_data; + TCGMemOp memop = s->be_data + size; g_assert(size <= 3); + tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), memop); + if (is_pair) { + TCGv_i64 addr2 = tcg_temp_new_i64(); TCGv_i64 hitmp = tcg_temp_new_i64(); - if (size == 3) { - TCGv_i64 addr2 = tcg_temp_new_i64(); - - tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), - MO_64 | MO_ALIGN_16 | be); - tcg_gen_addi_i64(addr2, addr, 8); - tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s), - MO_64 | MO_ALIGN | be); - tcg_temp_free_i64(addr2); - } else { - g_assert(size == 2); - tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), - MO_64 | MO_ALIGN | be); - if (be == MO_LE) { - tcg_gen_extr32_i64(tmp, hitmp, tmp); - } else { - tcg_gen_extr32_i64(hitmp, tmp, tmp); - } - } - + g_assert(size >= 2); + tcg_gen_addi_i64(addr2, addr, 1 << size); + tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s), memop); + tcg_temp_free_i64(addr2); tcg_gen_mov_i64(cpu_exclusive_high, hitmp); tcg_gen_mov_i64(cpu_reg(s, rt2), hitmp); tcg_temp_free_i64(hitmp); - } else { - tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), size | MO_ALIGN | be); } tcg_gen_mov_i64(cpu_exclusive_val, tmp); |