aboutsummaryrefslogtreecommitdiff
path: root/tcg/arm
diff options
context:
space:
mode:
authorBlue Swirl <blauwirbel@gmail.com>2011-09-18 14:55:46 +0000
committerBlue Swirl <blauwirbel@gmail.com>2012-03-18 12:21:52 +0000
commite141ab52d2ea5d0bc6ad3b1ad32841127ca04adc (patch)
tree9290ef9ce78430649ba092b1c755577c48bcf81c /tcg/arm
parent6a18ae2d2947532d5c26439548afa0481c4529f9 (diff)
softmmu templates: optionally pass CPUState to memory access functions
Optionally, make memory access helpers take a parameter for CPUState instead of relying on global env. On most targets, perform simple moves to reorder registers. On i386, switch from regparm(3) calling convention to standard stack-based version. Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Diffstat (limited to 'tcg/arm')
-rw-r--r--tcg/arm/tcg-target.c53
1 files changed, 53 insertions, 0 deletions
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
index 5af21b3f5d..4d59a63855 100644
--- a/tcg/arm/tcg-target.c
+++ b/tcg/arm/tcg-target.c
@@ -929,6 +929,27 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
#include "../../softmmu_defs.h"
+#ifdef CONFIG_TCG_PASS_AREG0
+/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
+ int mmu_idx) */
+static const void * const qemu_ld_helpers[4] = {
+ helper_ldb_mmu,
+ helper_ldw_mmu,
+ helper_ldl_mmu,
+ helper_ldq_mmu,
+};
+
+/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
+ uintxx_t val, int mmu_idx) */
+static const void * const qemu_st_helpers[4] = {
+ helper_stb_mmu,
+ helper_stw_mmu,
+ helper_stl_mmu,
+ helper_stq_mmu,
+};
+#else
+/* legacy helper signature: __ld_mmu(target_ulong addr, int
+ mmu_idx) */
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
@@ -936,6 +957,8 @@ static void *qemu_ld_helpers[4] = {
__ldq_mmu,
};
+/* legacy helper signature: __st_mmu(target_ulong addr, uintxx_t val,
+ int mmu_idx) */
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
@@ -943,6 +966,7 @@ static void *qemu_st_helpers[4] = {
__stq_mmu,
};
#endif
+#endif
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
@@ -1075,6 +1099,19 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
# endif
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal and incorrect for 64 bit */
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[2], 0,
+ tcg_target_call_iarg_regs[1], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[1], 0,
+ tcg_target_call_iarg_regs[0], SHIFT_IMM_LSL(0));
+
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[0], 0, TCG_AREG0,
+ SHIFT_IMM_LSL(0));
+#endif
tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]);
switch (opc) {
@@ -1341,6 +1378,22 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
}
# endif
+#ifdef CONFIG_TCG_PASS_AREG0
+ /* XXX/FIXME: suboptimal and incorrect for 64 bit */
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[3], 0,
+ tcg_target_call_iarg_regs[2], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[2], 0,
+ tcg_target_call_iarg_regs[1], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[1], 0,
+ tcg_target_call_iarg_regs[0], SHIFT_IMM_LSL(0));
+
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ tcg_target_call_iarg_regs[0], 0, TCG_AREG0,
+ SHIFT_IMM_LSL(0));
+#endif
tcg_out_call(s, (tcg_target_long) qemu_st_helpers[s_bits]);
if (opc == 3)
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);