aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/exec/exec-all.h18
-rw-r--r--tcg/aarch64/tcg-target.c28
2 files changed, 16 insertions, 30 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 8dd15948d8..3ce80d1587 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -320,24 +320,6 @@ extern uintptr_t tci_tb_ptr;
#define GETPC() (GETRA() - GETPC_ADJ)
-/* The LDST optimizations splits code generation into fast and slow path.
- In some implementations, we pass the "logical" return address manually;
- in others, we must infer the logical return from the true return. */
-#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
-# if defined(__aarch64__)
-# define GETRA_LDST(RA) tcg_getra_ldst(RA)
-static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
-{
- int32_t b;
- ra += 4; /* skip one instruction */
- b = *(int32_t *)ra; /* load the branch insn */
- b = (b << 6) >> (6 - 2); /* extract the displacement */
- ra += b; /* apply the displacement */
- return ra;
-}
-# endif
-#endif /* CONFIG_QEMU_LDST_OPTIMIZATION */
-
/* ??? Delete these once they are no longer used. */
bool is_tcg_gen_code(uintptr_t pc_ptr);
#ifdef GETRA_LDST
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index 6379df1f68..78e1443808 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -778,22 +778,24 @@ static inline void tcg_out_nop(TCGContext *s)
}
#ifdef CONFIG_SOFTMMU
-/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
- int mmu_idx) */
+/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
+ * int mmu_idx, uintptr_t ra)
+ */
static const void * const qemu_ld_helpers[4] = {
- helper_ldb_mmu,
- helper_ldw_mmu,
- helper_ldl_mmu,
- helper_ldq_mmu,
+ helper_ret_ldub_mmu,
+ helper_ret_lduw_mmu,
+ helper_ret_ldul_mmu,
+ helper_ret_ldq_mmu,
};
-/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
- uintxx_t val, int mmu_idx) */
+/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
+ * uintxx_t val, int mmu_idx, uintptr_t ra)
+ */
static const void * const qemu_st_helpers[4] = {
- helper_stb_mmu,
- helper_stw_mmu,
- helper_stl_mmu,
- helper_stq_mmu,
+ helper_ret_stb_mmu,
+ helper_ret_stw_mmu,
+ helper_ret_stl_mmu,
+ helper_ret_stq_mmu,
};
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@@ -802,6 +804,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0);
tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X3, (tcg_target_long)lb->raddr);
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
(tcg_target_long)qemu_ld_helpers[lb->opc & 3]);
tcg_out_callr(s, TCG_REG_TMP);
@@ -822,6 +825,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
tcg_out_movr(s, 1, TCG_REG_X2, lb->datalo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X4, (tcg_target_long)lb->raddr);
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
(tcg_target_long)qemu_st_helpers[lb->opc & 3]);
tcg_out_callr(s, TCG_REG_TMP);