diff options
author | Anthony Liguori <aliguori@amazon.com> | 2013-10-14 09:59:59 -0700 |
---|---|---|
committer | Anthony Liguori <aliguori@amazon.com> | 2013-10-14 09:59:59 -0700 |
commit | 1680d485777ecf436d724631ea8722cc0c66990e (patch) | |
tree | e34c35b33715e96c9b209caf2f9f721ff1356274 | |
parent | ded77da3cd6b6bcd201a4e36abb3294d725be644 (diff) | |
parent | f8da40aefbd1b40a0c5ab6fb25987943fe01125c (diff) |
Merge remote-tracking branch 'rth/tcg-ldst-6' into staging
# By Richard Henderson
# Via Richard Henderson
* rth/tcg-ldst-6:
target-alpha: Convert to new ldst opcodes
tcg-ppc64: Support new ldst opcodes
tcg-ppc: Support new ldst opcodes
tcg-ppc64: Convert to le/be ldst helpers
tcg-ppc: Convert to le/be ldst helpers
tcg-ppc64: Use TCGMemOp within qemu_ldst routines
tcg-ppc: Use TCGMemOp within qemu_ldst routines
tcg-arm: Improve GUEST_BASE qemu_ld/st
tcg-arm: Convert to new ldst opcodes
tcg-arm: Tidy variable naming convention in qemu_ld/st
tcg-arm: Convert to le/be ldst helpers
tcg-arm: Use TCGMemOp within qemu_ldst routines
tcg-i386: Support new ldst opcodes
tcg-i386: Remove "cb" output restriction from qemu_st8 for i386
tcg-i386: Tidy softmmu routines
tcg-i386: Use TCGMemOp within qemu_ldst routines
tcg: Use TCGMemOp for TCGLabelQemuLdst.opc
Message-id: 1381620683-4568-1-git-send-email-rth@twiddle.net
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
-rw-r--r-- | target-alpha/translate.c | 49 | ||||
-rw-r--r-- | tcg/arm/tcg-target.c | 556 | ||||
-rw-r--r-- | tcg/arm/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/i386/tcg-target.c | 643 | ||||
-rw-r--r-- | tcg/i386/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.c | 202 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/ppc64/tcg-target.c | 163 | ||||
-rw-r--r-- | tcg/ppc64/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/tcg-be-ldst.h | 2 |
10 files changed, 722 insertions, 901 deletions
diff --git a/target-alpha/translate.c b/target-alpha/translate.c index 9cb8084057..c24910f6a1 100644 --- a/target-alpha/translate.c +++ b/target-alpha/translate.c @@ -168,44 +168,38 @@ static inline ExitStatus gen_invalid(DisasContext *ctx) static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags) { - TCGv tmp = tcg_temp_new(); TCGv_i32 tmp32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld32u(tmp, t1, flags); - tcg_gen_trunc_i64_i32(tmp32, tmp); + tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); gen_helper_memory_to_f(t0, tmp32); tcg_temp_free_i32(tmp32); - tcg_temp_free(tmp); } static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags) { TCGv tmp = tcg_temp_new(); - tcg_gen_qemu_ld64(tmp, t1, flags); + tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ); gen_helper_memory_to_g(t0, tmp); tcg_temp_free(tmp); } static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags) { - TCGv tmp = tcg_temp_new(); TCGv_i32 tmp32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld32u(tmp, t1, flags); - tcg_gen_trunc_i64_i32(tmp32, tmp); + tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); gen_helper_memory_to_s(t0, tmp32); tcg_temp_free_i32(tmp32); - tcg_temp_free(tmp); } static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags) { - tcg_gen_qemu_ld32s(t0, t1, flags); + tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL); tcg_gen_mov_i64(cpu_lock_addr, t1); tcg_gen_mov_i64(cpu_lock_value, t0); } static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags) { - tcg_gen_qemu_ld64(t0, t1, flags); + tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ); tcg_gen_mov_i64(cpu_lock_addr, t1); tcg_gen_mov_i64(cpu_lock_value, t0); } @@ -247,11 +241,8 @@ static inline void gen_load_mem(DisasContext *ctx, static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - TCGv tmp = tcg_temp_new(); gen_helper_f_to_memory(tmp32, t0); - tcg_gen_extu_i32_i64(tmp, tmp32); - tcg_gen_qemu_st32(tmp, t1, flags); - tcg_temp_free(tmp); + tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); tcg_temp_free_i32(tmp32); } @@ -259,18 +250,15 @@ static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags) { TCGv tmp = tcg_temp_new(); gen_helper_g_to_memory(tmp, t0); - tcg_gen_qemu_st64(tmp, t1, flags); + tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ); tcg_temp_free(tmp); } static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - TCGv tmp = tcg_temp_new(); gen_helper_s_to_memory(tmp32, t0); - tcg_gen_extu_i32_i64(tmp, tmp32); - tcg_gen_qemu_st32(tmp, t1, flags); - tcg_temp_free(tmp); + tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); tcg_temp_free_i32(tmp32); } @@ -348,18 +336,11 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); val = tcg_temp_new(); - if (quad) { - tcg_gen_qemu_ld64(val, addr, ctx->mem_idx); - } else { - tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx); - } + tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL); tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail); - if (quad) { - tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx); - } else { - tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx); - } + tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx, + quad ? MO_LEQ : MO_LEUL); tcg_gen_movi_i64(cpu_ir[ra], 1); tcg_gen_br(lab_done); @@ -2966,11 +2947,11 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) goto invalid_opc; case 0xA: /* Longword virtual access with protection check (hw_ldl/w) */ - tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX); + tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LESL); break; case 0xB: /* Quadword virtual access with protection check (hw_ldq/w) */ - tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX); + tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LEQ); break; case 0xC: /* Longword virtual access with alt access mode (hw_ldl/a)*/ @@ -2981,12 +2962,12 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0xE: /* Longword virtual access with alternate access mode and protection checks (hw_ldl/wa) */ - tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX); + tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LESL); break; case 0xF: /* Quadword virtual access with alternate access mode and protection checks (hw_ldq/wa) */ - tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX); + tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LEQ); break; } tcg_temp_free(addr); diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c index c0e14661b2..e93a4a237b 100644 --- a/tcg/arm/tcg-target.c +++ b/tcg/arm/tcg-target.c @@ -186,7 +186,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) #endif break; - /* qemu_st address & data_reg */ + /* qemu_st address & data */ case 's': ct->ct |= TCG_CT_REG; tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); @@ -1079,26 +1079,34 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static const void * const qemu_ld_helpers[8] = { - helper_ret_ldub_mmu, - helper_ret_lduw_mmu, - helper_ret_ldul_mmu, - helper_ret_ldq_mmu, - - helper_ret_ldsb_mmu, - helper_ret_ldsw_mmu, - helper_ret_ldul_mmu, - helper_ret_ldq_mmu, +static const void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_SB] = helper_ret_ldsb_mmu, + + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_LESW] = helper_le_ldsw_mmu, + [MO_LESL] = helper_le_ldul_mmu, + + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, + [MO_BESW] = helper_be_ldsw_mmu, + [MO_BESL] = helper_be_ldul_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static const void * const qemu_st_helpers[4] = { - helper_ret_stb_mmu, - helper_ret_stw_mmu, - helper_ret_stl_mmu, - helper_ret_stq_mmu, +static const void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, }; /* Helper routines for marshalling helper function arguments into @@ -1169,7 +1177,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, - int s_bits, int mem_index, bool is_load) + TCGMemOp s_bits, int mem_index, bool is_load) { TCGReg base = TCG_AREG0; int cmp_off = @@ -1179,13 +1187,13 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); /* Should generate something like the following: - * shr tmp, addr_reg, #TARGET_PAGE_BITS (1) + * shr tmp, addrlo, #TARGET_PAGE_BITS (1) * add r2, env, #high * and r0, tmp, #(CPU_TLB_SIZE - 1) (2) * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3) * ldr r0, [r2, #cmp] (4) - * tst addr_reg, #s_mask - * ldr r1, [r2, #add] (5) + * tst addrlo, #s_mask + * ldr r2, [r2, #add] (5) * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS */ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, @@ -1240,19 +1248,19 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ -static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc, - int data_reg, int data_reg2, int addrlo_reg, - int addrhi_reg, int mem_index, +static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc, + TCGReg datalo, TCGReg datahi, TCGReg addrlo, + TCGReg addrhi, int mem_index, uint8_t *raddr, uint8_t *label_ptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; label->opc = opc; - label->datalo_reg = data_reg; - label->datahi_reg = data_reg2; - label->addrlo_reg = addrlo_reg; - label->addrhi_reg = addrhi_reg; + label->datalo_reg = datalo; + label->datahi_reg = datahi; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; label->mem_index = mem_index; label->raddr = raddr; label->label_ptr[0] = label_ptr; @@ -1260,8 +1268,8 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc, static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGReg argreg, data_reg, data_reg2; - int opc = lb->opc; + TCGReg argreg, datalo, datahi; + TCGMemOp opc = lb->opc; uintptr_t func; reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr); @@ -1279,38 +1287,38 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) icache usage. For pre-armv6, use the signed helpers since we do not have a single insn sign-extend. */ if (use_armv6_instructions) { - func = (uintptr_t)qemu_ld_helpers[opc & 3]; + func = (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN]; } else { func = (uintptr_t)qemu_ld_helpers[opc]; - if (opc & 4) { - opc = 2; + if (opc & MO_SIGN) { + opc = MO_UL; } } tcg_out_call(s, func); - data_reg = lb->datalo_reg; - data_reg2 = lb->datahi_reg; - switch (opc) { - case 0 | 4: - tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0); + datalo = lb->datalo_reg; + datahi = lb->datahi_reg; + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0); break; - case 1 | 4: - tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0); + case MO_SW: + tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0); break; default: - tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0); - break; - case 3: - if (data_reg != TCG_REG_R1) { - tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0); - tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1); - } else if (data_reg2 != TCG_REG_R0) { - tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1); - tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0); + tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); + break; + case MO_Q: + if (datalo != TCG_REG_R1) { + tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); + tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); + } else if (datahi != TCG_REG_R0) { + tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); + tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); } else { tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0); - tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1); - tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_TMP); + tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); + tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP); } break; } @@ -1320,7 +1328,8 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGReg argreg, data_reg, data_reg2; + TCGReg argreg, datalo, datahi; + TCGMemOp opc = lb->opc; reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr); @@ -1332,20 +1341,21 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg); } - data_reg = lb->datalo_reg; - data_reg2 = lb->datahi_reg; - switch (lb->opc) { - case 0: - argreg = tcg_out_arg_reg8(s, argreg, data_reg); + datalo = lb->datalo_reg; + datahi = lb->datahi_reg; + switch (opc & MO_SIZE) { + case MO_8: + argreg = tcg_out_arg_reg8(s, argreg, datalo); break; - case 1: - argreg = tcg_out_arg_reg16(s, argreg, data_reg); + case MO_16: + argreg = tcg_out_arg_reg16(s, argreg, datalo); break; - case 2: - argreg = tcg_out_arg_reg32(s, argreg, data_reg); + case MO_32: + default: + argreg = tcg_out_arg_reg32(s, argreg, datalo); break; - case 3: - argreg = tcg_out_arg_reg64(s, argreg, data_reg, data_reg2); + case MO_64: + argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi); break; } @@ -1353,282 +1363,289 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); /* Tail-call to the helper, which will return to the fast path. */ - tcg_out_goto(s, COND_AL, (tcg_target_long) qemu_st_helpers[lb->opc & 3]); + tcg_out_goto(s, COND_AL, (uintptr_t)qemu_st_helpers[opc]); } #endif /* SOFTMMU */ -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) +static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addend) { - TCGReg addr_reg, data_reg, data_reg2; - bool bswap; -#ifdef CONFIG_SOFTMMU - int mem_index, s_bits; - TCGReg addr_reg2, addend; - uint8_t *label_ptr; -#endif -#ifdef TARGET_WORDS_BIGENDIAN - bswap = 1; -#else - bswap = 0; -#endif + TCGMemOp bswap = opc & MO_BSWAP; - data_reg = *args++; - data_reg2 = (opc == 3 ? *args++ : 0); - addr_reg = *args++; -#ifdef CONFIG_SOFTMMU - addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0); - mem_index = *args; - s_bits = opc & 3; - - addend = tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits, mem_index, 1); - - /* This a conditional BL only to load a pointer within this opcode into LR - for the slow path. We will not be using the value for a tail call. */ - label_ptr = s->code_ptr; - tcg_out_bl_noaddr(s, COND_NE); - - switch (opc) { - case 0: - tcg_out_ld8_r(s, COND_AL, data_reg, addr_reg, addend); + switch (opc & MO_SSIZE) { + case MO_UB: + tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend); break; - case 0 | 4: - tcg_out_ld8s_r(s, COND_AL, data_reg, addr_reg, addend); + case MO_SB: + tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend); break; - case 1: - tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, addend); + case MO_UW: + tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); if (bswap) { - tcg_out_bswap16(s, COND_AL, data_reg, data_reg); + tcg_out_bswap16(s, COND_AL, datalo, datalo); } break; - case 1 | 4: + case MO_SW: if (bswap) { - tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, addend); - tcg_out_bswap16s(s, COND_AL, data_reg, data_reg); + tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend); + tcg_out_bswap16s(s, COND_AL, datalo, datalo); } else { - tcg_out_ld16s_r(s, COND_AL, data_reg, addr_reg, addend); + tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend); } break; - case 2: + case MO_UL: default: - tcg_out_ld32_r(s, COND_AL, data_reg, addr_reg, addend); + tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend); if (bswap) { - tcg_out_bswap32(s, COND_AL, data_reg, data_reg); + tcg_out_bswap32(s, COND_AL, datalo, datalo); } break; - case 3: + case MO_Q: { - /* Be careful not to modify data_reg and data_reg2 - for the slow path below. */ - TCGReg dl = (bswap ? data_reg2 : data_reg); - TCGReg dh = (bswap ? data_reg : data_reg2); + TCGReg dl = (bswap ? datahi : datalo); + TCGReg dh = (bswap ? datalo : datahi); if (use_armv6_instructions && (dl & 1) == 0 && dh == dl + 1) { - tcg_out_ldrd_r(s, COND_AL, dl, addr_reg, addend); + tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend); } else if (dl != addend) { - tcg_out_ld32_rwb(s, COND_AL, dl, addend, addr_reg); + tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo); tcg_out_ld32_12(s, COND_AL, dh, addend, 4); } else { tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP, - addend, addr_reg, SHIFT_IMM_LSL(0)); + addend, addrlo, SHIFT_IMM_LSL(0)); tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0); tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4); } if (bswap) { - tcg_out_bswap32(s, COND_AL, dh, dh); tcg_out_bswap32(s, COND_AL, dl, dl); + tcg_out_bswap32(s, COND_AL, dh, dh); } } break; } +} - add_qemu_ldst_label(s, 1, opc, data_reg, data_reg2, addr_reg, addr_reg2, - mem_index, s->code_ptr, label_ptr); -#else /* !CONFIG_SOFTMMU */ - if (GUEST_BASE) { - uint32_t offset = GUEST_BASE; - int i, rot; - - while (offset) { - i = ctz32(offset) & ~1; - rot = ((32 - i) << 7) & 0xf00; +static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo) +{ + TCGMemOp bswap = opc & MO_BSWAP; - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, addr_reg, - ((offset >> i) & 0xff) | rot); - addr_reg = TCG_REG_TMP; - offset &= ~(0xff << i); - } - } - switch (opc) { - case 0: - tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0); + switch (opc & MO_SSIZE) { + case MO_UB: + tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0); break; - case 0 | 4: - tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0); + case MO_SB: + tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0); break; - case 1: - tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0); + case MO_UW: + tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); if (bswap) { - tcg_out_bswap16(s, COND_AL, data_reg, data_reg); + tcg_out_bswap16(s, COND_AL, datalo, datalo); } break; - case 1 | 4: + case MO_SW: if (bswap) { - tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0); - tcg_out_bswap16s(s, COND_AL, data_reg, data_reg); + tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0); + tcg_out_bswap16s(s, COND_AL, datalo, datalo); } else { - tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0); + tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0); } break; - case 2: + case MO_UL: default: - tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); + tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0); if (bswap) { - tcg_out_bswap32(s, COND_AL, data_reg, data_reg); + tcg_out_bswap32(s, COND_AL, datalo, datalo); } break; - case 3: - if (use_armv6_instructions && !bswap - && (data_reg & 1) == 0 && data_reg2 == data_reg + 1) { - tcg_out_ldrd_8(s, COND_AL, data_reg, addr_reg, 0); - } else if (use_armv6_instructions && bswap - && (data_reg2 & 1) == 0 && data_reg == data_reg2 + 1) { - tcg_out_ldrd_8(s, COND_AL, data_reg2, addr_reg, 0); - } else if (data_reg == addr_reg) { - tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4); - tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0); - } else { - tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0); - tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4); - } - if (bswap) { - tcg_out_bswap32(s, COND_AL, data_reg, data_reg); - tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2); + case MO_Q: + { + TCGReg dl = (bswap ? datahi : datalo); + TCGReg dh = (bswap ? datalo : datahi); + + if (use_armv6_instructions && (dl & 1) == 0 && dh == dl + 1) { + tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0); + } else if (dl == addrlo) { + tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4); + tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0); + } else { + tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0); + tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4); + } + if (bswap) { + tcg_out_bswap32(s, COND_AL, dl, dl); + tcg_out_bswap32(s, COND_AL, dh, dh); + } } break; } -#endif } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { - TCGReg addr_reg, data_reg, data_reg2; - bool bswap; + TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); + TCGMemOp opc; #ifdef CONFIG_SOFTMMU - int mem_index, s_bits; - TCGReg addr_reg2, addend; + int mem_index; + TCGReg addend; uint8_t *label_ptr; #endif -#ifdef TARGET_WORDS_BIGENDIAN - bswap = 1; -#else - bswap = 0; -#endif - data_reg = *args++; - data_reg2 = (opc == 3 ? *args++ : 0); - addr_reg = *args++; + datalo = *args++; + datahi = (is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); + opc = *args++; + #ifdef CONFIG_SOFTMMU - addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0); mem_index = *args; - s_bits = opc & 3; + addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 1); - addend = tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits, mem_index, 0); + /* This a conditional BL only to load a pointer within this opcode into LR + for the slow path. We will not be using the value for a tail call. */ + label_ptr = s->code_ptr; + tcg_out_bl_noaddr(s, COND_NE); - switch (opc) { - case 0: - tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, addend); + tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend); + + add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); +#else /* !CONFIG_SOFTMMU */ + if (GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE); + tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP); + } else { + tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); + } +#endif +} + +static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addend) +{ + TCGMemOp bswap = opc & MO_BSWAP; + + switch (opc & MO_SIZE) { + case MO_8: + tcg_out_st8_r(s, cond, datalo, addrlo, addend); break; - case 1: + case MO_16: if (bswap) { - tcg_out_bswap16st(s, COND_EQ, TCG_REG_R0, data_reg); - tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, addend); + tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo); + tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend); } else { - tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, addend); + tcg_out_st16_r(s, cond, datalo, addrlo, addend); } break; - case 2: + case MO_32: default: if (bswap) { - tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg); - tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, addend); + tcg_out_bswap32(s, cond, TCG_REG_R0, datalo); + tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend); } else { - tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, addend); + tcg_out_st32_r(s, cond, datalo, addrlo, addend); } break; - case 3: + case MO_64: if (bswap) { - tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2); - tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, addend, addr_reg); - tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg); - tcg_out_st32_12(s, COND_EQ, TCG_REG_R0, addend, 4); + tcg_out_bswap32(s, cond, TCG_REG_R0, datahi); + tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo); + tcg_out_bswap32(s, cond, TCG_REG_R0, datalo); + tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4); } else if (use_armv6_instructions - && (data_reg & 1) == 0 && data_reg2 == data_reg + 1) { - tcg_out_strd_r(s, COND_EQ, data_reg, addr_reg, addend); + && (datalo & 1) == 0 && datahi == datalo + 1) { + tcg_out_strd_r(s, cond, datalo, addrlo, addend); } else { - tcg_out_st32_rwb(s, COND_EQ, data_reg, addend, addr_reg); - tcg_out_st32_12(s, COND_EQ, data_reg2, addend, 4); + tcg_out_st32_rwb(s, cond, datalo, addend, addrlo); + tcg_out_st32_12(s, cond, datahi, addend, 4); } break; } +} - /* The conditional call must come last, as we're going to return here. */ - label_ptr = s->code_ptr; - tcg_out_bl_noaddr(s, COND_NE); +static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo) +{ + TCGMemOp bswap = opc & MO_BSWAP; - add_qemu_ldst_label(s, 0, opc, data_reg, data_reg2, addr_reg, addr_reg2, - mem_index, s->code_ptr, label_ptr); -#else /* !CONFIG_SOFTMMU */ - if (GUEST_BASE) { - uint32_t offset = GUEST_BASE; - int i; - int rot; - - while (offset) { - i = ctz32(offset) & ~1; - rot = ((32 - i) << 7) & 0xf00; - - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg, - ((offset >> i) & 0xff) | rot); - addr_reg = TCG_REG_R1; - offset &= ~(0xff << i); - } - } - switch (opc) { - case 0: - tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0); + switch (opc & MO_SIZE) { + case MO_8: + tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0); break; - case 1: + case MO_16: if (bswap) { - tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg); - tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0); + tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo); + tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0); } else { - tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0); + tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0); } break; - case 2: + case MO_32: default: if (bswap) { - tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg); - tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0); + tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo); + tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0); } else { - tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); + tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); } break; - case 3: + case MO_64: if (bswap) { - tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2); - tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0); - tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg); - tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4); + tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi); + tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0); + tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo); + tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4); } else if (use_armv6_instructions - && (data_reg & 1) == 0 && data_reg2 == data_reg + 1) { - tcg_out_strd_8(s, COND_AL, data_reg, addr_reg, 0); + && (datalo & 1) == 0 && datahi == datalo + 1) { + tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0); } else { - tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); - tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4); + tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0); + tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4); } break; } +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) +{ + TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); + TCGMemOp opc; +#ifdef CONFIG_SOFTMMU + int mem_index; + TCGReg addend; + uint8_t *label_ptr; +#endif + + datalo = *args++; + datahi = (is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); + opc = *args++; + +#ifdef CONFIG_SOFTMMU + mem_index = *args; + addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 0); + + tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend); + + /* The conditional call must come last, as we're going to return here. */ + label_ptr = s->code_ptr; + tcg_out_bl_noaddr(s, COND_NE); + + add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); +#else /* !CONFIG_SOFTMMU */ + if (GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE); + tcg_out_qemu_st_index(s, COND_AL, opc, datalo, + datahi, addrlo, TCG_REG_TMP); + } else { + tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo); + } #endif } @@ -1897,37 +1914,18 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, ARITH_MOV, args[0], 0, 0); break; - case INDEX_op_qemu_ld8u: + case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, 0); break; - case INDEX_op_qemu_ld8s: - tcg_out_qemu_ld(s, args, 0 | 4); - break; - case INDEX_op_qemu_ld16u: + case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, 1); break; - case INDEX_op_qemu_ld16s: - tcg_out_qemu_ld(s, args, 1 | 4); - break; - case INDEX_op_qemu_ld32: - tcg_out_qemu_ld(s, args, 2); - break; - case INDEX_op_qemu_ld64: - tcg_out_qemu_ld(s, args, 3); - break; - - case INDEX_op_qemu_st8: + case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, 0); break; - case INDEX_op_qemu_st16: + case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, 1); break; - case INDEX_op_qemu_st32: - tcg_out_qemu_st(s, args, 2); - break; - case INDEX_op_qemu_st64: - tcg_out_qemu_st(s, args, 3); - break; case INDEX_op_bswap16_i32: tcg_out_bswap16(s, COND_AL, args[0], args[1]); @@ -2010,29 +2008,15 @@ static const TCGTargetOpDef arm_op_defs[] = { { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } }, #if TARGET_LONG_BITS == 32 - { INDEX_op_qemu_ld8u, { "r", "l" } }, - { INDEX_op_qemu_ld8s, { "r", "l" } }, - { INDEX_op_qemu_ld16u, { "r", "l" } }, - { INDEX_op_qemu_ld16s, { "r", "l" } }, - { INDEX_op_qemu_ld32, { "r", "l" } }, - { INDEX_op_qemu_ld64, { "r", "r", "l" } }, - - { INDEX_op_qemu_st8, { "s", "s" } }, - { INDEX_op_qemu_st16, { "s", "s" } }, - { INDEX_op_qemu_st32, { "s", "s" } }, - { INDEX_op_qemu_st64, { "s", "s", "s" } }, + { INDEX_op_qemu_ld_i32, { "r", "l" } }, + { INDEX_op_qemu_ld_i64, { "r", "r", "l" } }, + { INDEX_op_qemu_st_i32, { "s", "s" } }, + { INDEX_op_qemu_st_i64, { "s", "s", "s" } }, #else - { INDEX_op_qemu_ld8u, { "r", "l", "l" } }, - { INDEX_op_qemu_ld8s, { "r", "l", "l" } }, - { INDEX_op_qemu_ld16u, { "r", "l", "l" } }, - { INDEX_op_qemu_ld16s, { "r", "l", "l" } }, - { INDEX_op_qemu_ld32, { "r", "l", "l" } }, - { INDEX_op_qemu_ld64, { "r", "r", "l", "l" } }, - - { INDEX_op_qemu_st8, { "s", "s", "s" } }, - { INDEX_op_qemu_st16, { "s", "s", "s" } }, - { INDEX_op_qemu_st32, { "s", "s", "s" } }, - { INDEX_op_qemu_st64, { "s", "s", "s", "s" } }, + { INDEX_op_qemu_ld_i32, { "r", "l", "l" } }, + { INDEX_op_qemu_ld_i64, { "r", "r", "l", "l" } }, + { INDEX_op_qemu_st_i32, { "s", "s", "s" } }, + { INDEX_op_qemu_st_i64, { "s", "s", "s", "s" } }, #endif { INDEX_op_bswap16_i32, { "r", "r" } }, diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h index 25e1e2876f..3746b6e298 100644 --- a/tcg/arm/tcg-target.h +++ b/tcg/arm/tcg-target.h @@ -85,7 +85,7 @@ extern bool use_idiv_instructions; #define TCG_TARGET_HAS_div_i32 use_idiv_instructions #define TCG_TARGET_HAS_rem_i32 0 -#define TCG_TARGET_HAS_new_ldst 0 +#define TCG_TARGET_HAS_new_ldst 1 extern bool tcg_target_deposit_valid(int ofs, int len); #define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index b865b4b662..7ac8e45485 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -1026,39 +1026,33 @@ static void tcg_out_jmp(TCGContext *s, uintptr_t dest) /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static const void * const qemu_ld_helpers[4] = { - helper_ret_ldub_mmu, - helper_ret_lduw_mmu, - helper_ret_ldul_mmu, - helper_ret_ldq_mmu, +static const void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static const void * const qemu_st_helpers[4] = { - helper_ret_stb_mmu, - helper_ret_stw_mmu, - helper_ret_stl_mmu, - helper_ret_stq_mmu, +static const void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, }; -static void add_qemu_ldst_label(TCGContext *s, - int is_ld, - int opc, - int data_reg, - int data_reg2, - int addrlo_reg, - int addrhi_reg, - int mem_index, - uint8_t *raddr, - uint8_t **label_ptr); - /* Perform the TLB load and compare. Inputs: - ADDRLO_IDX contains the index into ARGS of the low part of the - address; the high part of the address is at ADDR_LOW_IDX+1. + ADDRLO and ADDRHI contain the low and high part of the address. MEM_INDEX and S_BITS are the memory context and log2 size of the load. @@ -1076,14 +1070,12 @@ static void add_qemu_ldst_label(TCGContext *s, First argument register is clobbered. */ -static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, - int mem_index, int s_bits, - const TCGArg *args, +static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, + int mem_index, TCGMemOp s_bits, uint8_t **label_ptr, int which) { - const int addrlo = args[addrlo_idx]; - const int r0 = TCG_REG_L0; - const int r1 = TCG_REG_L1; + const TCGReg r0 = TCG_REG_L0; + const TCGReg r1 = TCG_REG_L1; TCGType ttype = TCG_TYPE_I32; TCGType htype = TCG_TYPE_I32; int trexw = 0, hrexw = 0; @@ -1132,7 +1124,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { /* cmp 4(r0), addrhi */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r0, 4); + tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4); /* jne slow_path */ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); @@ -1146,6 +1138,182 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, offsetof(CPUTLBEntry, addend) - which); } + +/* + * Record the context of a call to the out of line helper code for the slow path + * for a load or store, so that we can later generate the correct helper code + */ +static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + int mem_index, uint8_t *raddr, + uint8_t **label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->opc = opc; + label->datalo_reg = datalo; + label->datahi_reg = datahi; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + label->mem_index = mem_index; + label->raddr = raddr; + label->label_ptr[0] = label_ptr[0]; + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + label->label_ptr[1] = label_ptr[1]; + } +} + +/* + * Generate code for the slow path for a load at the end of block + */ +static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOp opc = l->opc; + TCGReg data_reg; + uint8_t **label_ptr = &l->label_ptr[0]; + + /* resolve label address */ + *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); + } + + if (TCG_TARGET_REG_BITS == 32) { + int ofs = 0; + + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (TARGET_LONG_BITS == 64) { + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); + ofs += 4; + + tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr); + } else { + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + /* The second argument is already loaded with addrlo. */ + tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], + l->mem_index); + tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], + (uintptr_t)l->raddr); + } + + tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN]); + + data_reg = l->datalo_reg; + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW); + break; + case MO_SW: + tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW); + break; +#if TCG_TARGET_REG_BITS == 64 + case MO_SL: + tcg_out_ext32s(s, data_reg, TCG_REG_EAX); + break; +#endif + case MO_UB: + case MO_UW: + /* Note that the helpers have zero-extended to tcg_target_long. */ + case MO_UL: + tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); + break; + case MO_Q: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); + } else if (data_reg == TCG_REG_EDX) { + /* xchg %edx, %eax */ + tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX); + } else { + tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX); + } + break; + default: + tcg_abort(); + } + + /* Jump to the code corresponding to next IR of qemu_st */ + tcg_out_jmp(s, (uintptr_t)l->raddr); +} + +/* + * Generate code for the slow path for a store at the end of block + */ +static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOp opc = l->opc; + TCGMemOp s_bits = opc & MO_SIZE; + uint8_t **label_ptr = &l->label_ptr[0]; + TCGReg retaddr; + + /* resolve label address */ + *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); + } + + if (TCG_TARGET_REG_BITS == 32) { + int ofs = 0; + + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (TARGET_LONG_BITS == 64) { + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (s_bits == MO_64) { + tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); + ofs += 4; + + retaddr = TCG_REG_EAX; + tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr); + tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs); + } else { + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + /* The second argument is already loaded with addrlo. */ + tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), + tcg_target_call_iarg_regs[2], l->datalo_reg); + tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + l->mem_index); + + if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) { + retaddr = tcg_target_call_iarg_regs[4]; + tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); + } else { + retaddr = TCG_REG_RAX; + tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); + tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0); + } + } + + /* "Tail call" to the helper, with the return address back inline. */ + tcg_out_push(s, retaddr); + tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[opc]); +} #elif defined(__x86_64__) && defined(__linux__) # include <asm/prctl.h> # include <sys/prctl.h> @@ -1164,28 +1332,26 @@ static inline void setup_guest_base_seg(void) static inline void setup_guest_base_seg(void) { } #endif /* SOFTMMU */ -static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi, - int base, intptr_t ofs, int seg, int sizeop) +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, intptr_t ofs, int seg, + TCGMemOp memop) { -#ifdef TARGET_WORDS_BIGENDIAN - const int bswap = 1; -#else - const int bswap = 0; -#endif - switch (sizeop) { - case 0: + const TCGMemOp bswap = memop & MO_BSWAP; + + switch (memop & MO_SSIZE) { + case MO_UB: tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs); break; - case 0 | 4: + case MO_SB: tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs); break; - case 1: + case MO_UW: tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs); if (bswap) { tcg_out_rolw_8(s, datalo); } break; - case 1 | 4: + case MO_SW: if (bswap) { tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs); tcg_out_rolw_8(s, datalo); @@ -1195,14 +1361,14 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi, datalo, base, ofs); } break; - case 2: + case MO_UL: tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs); if (bswap) { tcg_out_bswap32(s, datalo); } break; #if TCG_TARGET_REG_BITS == 64 - case 2 | 4: + case MO_SL: if (bswap) { tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs); tcg_out_bswap32(s, datalo); @@ -1212,7 +1378,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi, } break; #endif - case 3: + case MO_Q: if (TCG_TARGET_REG_BITS == 64) { tcg_out_modrm_offset(s, OPC_MOVL_GvEv + P_REXW + seg, datalo, base, ofs); @@ -1250,48 +1416,40 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi, /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and EAX. It will be useful once fixed registers globals are less common. */ -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, - int opc) +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { - int data_reg, data_reg2 = 0; - int addrlo_idx; + TCGReg datalo, datahi, addrlo; + TCGReg addrhi __attribute__((unused)); + TCGMemOp opc; #if defined(CONFIG_SOFTMMU) - int mem_index, s_bits; + int mem_index; + TCGMemOp s_bits; uint8_t *label_ptr[2]; #endif - data_reg = args[0]; - addrlo_idx = 1; - if (TCG_TARGET_REG_BITS == 32 && opc == 3) { - data_reg2 = args[1]; - addrlo_idx = 2; - } + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); + opc = *args++; #if defined(CONFIG_SOFTMMU) - mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)]; - s_bits = opc & 3; + mem_index = *args++; + s_bits = opc & MO_SIZE; - tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args, + tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits, label_ptr, offsetof(CPUTLBEntry, addr_read)); /* TLB Hit. */ - tcg_out_qemu_ld_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc); + tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); /* Record the current context of a load into ldst label */ - add_qemu_ldst_label(s, - 1, - opc, - data_reg, - data_reg2, - args[addrlo_idx], - args[addrlo_idx + 1], - mem_index, - s->code_ptr, - label_ptr); + add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); #else { int32_t offset = GUEST_BASE; - int base = args[addrlo_idx]; + TCGReg base = addrlo; int seg = 0; /* ??? We assume all operations have left us with register contents @@ -1309,32 +1467,35 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, offset = 0; } - tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, seg, opc); + tcg_out_qemu_ld_direct(s, datalo, datahi, base, offset, seg, opc); } #endif } -static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi, - int base, intptr_t ofs, int seg, - int sizeop) +static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, intptr_t ofs, int seg, + TCGMemOp memop) { -#ifdef TARGET_WORDS_BIGENDIAN - const int bswap = 1; -#else - const int bswap = 0; -#endif + const TCGMemOp bswap = memop & MO_BSWAP; + /* ??? Ideally we wouldn't need a scratch register. For user-only, we could perform the bswap twice to restore the original value instead of moving to the scratch. But as it is, the L constraint means that TCG_REG_L0 is definitely free here. */ - const int scratch = TCG_REG_L0; + const TCGReg scratch = TCG_REG_L0; - switch (sizeop) { - case 0: + switch (memop & MO_SIZE) { + case MO_8: + /* In 32-bit mode, 8-byte stores can only happen from [abcd]x. + Use the scratch register if necessary. */ + if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + datalo = scratch; + } tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, datalo, base, ofs); break; - case 1: + case MO_16: if (bswap) { tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); tcg_out_rolw_8(s, scratch); @@ -1343,7 +1504,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi, tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16 + seg, datalo, base, ofs); break; - case 2: + case MO_32: if (bswap) { tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); tcg_out_bswap32(s, scratch); @@ -1351,7 +1512,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi, } tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs); break; - case 3: + case MO_64: if (TCG_TARGET_REG_BITS == 64) { if (bswap) { tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo); @@ -1377,48 +1538,40 @@ static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi, } } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, - int opc) +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { - int data_reg, data_reg2 = 0; - int addrlo_idx; + TCGReg datalo, datahi, addrlo; + TCGReg addrhi __attribute__((unused)); + TCGMemOp opc; #if defined(CONFIG_SOFTMMU) - int mem_index, s_bits; + int mem_index; + TCGMemOp s_bits; uint8_t *label_ptr[2]; #endif - data_reg = args[0]; - addrlo_idx = 1; - if (TCG_TARGET_REG_BITS == 32 && opc == 3) { - data_reg2 = args[1]; - addrlo_idx = 2; - } + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); + opc = *args++; #if defined(CONFIG_SOFTMMU) - mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)]; - s_bits = opc; + mem_index = *args++; + s_bits = opc & MO_SIZE; - tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args, + tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits, label_ptr, offsetof(CPUTLBEntry, addr_write)); /* TLB Hit. */ - tcg_out_qemu_st_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc); + tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); /* Record the current context of a store into ldst label */ - add_qemu_ldst_label(s, - 0, - opc, - data_reg, - data_reg2, - args[addrlo_idx], - args[addrlo_idx + 1], - mem_index, - s->code_ptr, - label_ptr); + add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); #else { int32_t offset = GUEST_BASE; - int base = args[addrlo_idx]; + TCGReg base = addrlo; int seg = 0; /* ??? We assume all operations have left us with register contents @@ -1436,195 +1589,11 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, offset = 0; } - tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, seg, opc); + tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc); } #endif } -#if defined(CONFIG_SOFTMMU) -/* - * Record the context of a call to the out of line helper code for the slow path - * for a load or store, so that we can later generate the correct helper code - */ -static void add_qemu_ldst_label(TCGContext *s, - int is_ld, - int opc, - int data_reg, - int data_reg2, - int addrlo_reg, - int addrhi_reg, - int mem_index, - uint8_t *raddr, - uint8_t **label_ptr) -{ - TCGLabelQemuLdst *label = new_ldst_label(s); - - label->is_ld = is_ld; - label->opc = opc; - label->datalo_reg = data_reg; - label->datahi_reg = data_reg2; - label->addrlo_reg = addrlo_reg; - label->addrhi_reg = addrhi_reg; - label->mem_index = mem_index; - label->raddr = raddr; - label->label_ptr[0] = label_ptr[0]; - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - label->label_ptr[1] = label_ptr[1]; - } -} - -/* - * Generate code for the slow path for a load at the end of block - */ -static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - int opc = l->opc; - int s_bits = opc & 3; - TCGReg data_reg; - uint8_t **label_ptr = &l->label_ptr[0]; - - /* resolve label address */ - *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); - } - - if (TCG_TARGET_REG_BITS == 32) { - int ofs = 0; - - tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (TARGET_LONG_BITS == 64) { - tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); - ofs += 4; - - tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr); - } else { - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - /* The second argument is already loaded with addrlo. */ - tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], - l->mem_index); - tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], - (uintptr_t)l->raddr); - } - - tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[s_bits]); - - data_reg = l->datalo_reg; - switch(opc) { - case 0 | 4: - tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW); - break; - case 1 | 4: - tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW); - break; -#if TCG_TARGET_REG_BITS == 64 - case 2 | 4: - tcg_out_ext32s(s, data_reg, TCG_REG_EAX); - break; -#endif - case 0: - case 1: - /* Note that the helpers have zero-extended to tcg_target_long. */ - case 2: - tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); - break; - case 3: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); - } else if (data_reg == TCG_REG_EDX) { - /* xchg %edx, %eax */ - tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); - tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX); - } else { - tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); - tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX); - } - break; - default: - tcg_abort(); - } - - /* Jump to the code corresponding to next IR of qemu_st */ - tcg_out_jmp(s, (uintptr_t)l->raddr); -} - -/* - * Generate code for the slow path for a store at the end of block - */ -static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) -{ - int opc = l->opc; - int s_bits = opc & 3; - uint8_t **label_ptr = &l->label_ptr[0]; - TCGReg retaddr; - - /* resolve label address */ - *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); - if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); - } - - if (TCG_TARGET_REG_BITS == 32) { - int ofs = 0; - - tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (TARGET_LONG_BITS == 64) { - tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (opc == 3) { - tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); - ofs += 4; - - retaddr = TCG_REG_EAX; - tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr); - tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs); - } else { - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - /* The second argument is already loaded with addrlo. */ - tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32), - tcg_target_call_iarg_regs[2], l->datalo_reg); - tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], - l->mem_index); - - if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) { - retaddr = tcg_target_call_iarg_regs[4]; - tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); - } else { - retaddr = TCG_REG_RAX; - tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); - tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0); - } - } - - /* "Tail call" to the helper, with the return address back inline. */ - tcg_out_push(s, retaddr); - tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[s_bits]); -} -#endif /* CONFIG_SOFTMMU */ - static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { @@ -1850,40 +1819,18 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_ext16u(s, args[0], args[1]); break; - case INDEX_op_qemu_ld8u: + case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, 0); break; - case INDEX_op_qemu_ld8s: - tcg_out_qemu_ld(s, args, 0 | 4); - break; - case INDEX_op_qemu_ld16u: + case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, 1); break; - case INDEX_op_qemu_ld16s: - tcg_out_qemu_ld(s, args, 1 | 4); - break; -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_qemu_ld32u: -#endif - case INDEX_op_qemu_ld32: - tcg_out_qemu_ld(s, args, 2); - break; - case INDEX_op_qemu_ld64: - tcg_out_qemu_ld(s, args, 3); - break; - - case INDEX_op_qemu_st8: + case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, 0); break; - case INDEX_op_qemu_st16: + case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, 1); break; - case INDEX_op_qemu_st32: - tcg_out_qemu_st(s, args, 2); - break; - case INDEX_op_qemu_st64: - tcg_out_qemu_st(s, args, 3); - break; OP_32_64(mulu2): tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]); @@ -1942,9 +1889,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); } break; - case INDEX_op_qemu_ld32s: - tcg_out_qemu_ld(s, args, 2 | 4); - break; case INDEX_op_brcond_i64: tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1], @@ -2109,43 +2053,20 @@ static const TCGTargetOpDef x86_op_defs[] = { #endif #if TCG_TARGET_REG_BITS == 64 - { INDEX_op_qemu_ld8u, { "r", "L" } }, - { INDEX_op_qemu_ld8s, { "r", "L" } }, - { INDEX_op_qemu_ld16u, { "r", "L" } }, - { INDEX_op_qemu_ld16s, { "r", "L" } }, - { INDEX_op_qemu_ld32, { "r", "L" } }, - { INDEX_op_qemu_ld32u, { "r", "L" } }, - { INDEX_op_qemu_ld32s, { "r", "L" } }, - { INDEX_op_qemu_ld64, { "r", "L" } }, - - { INDEX_op_qemu_st8, { "L", "L" } }, - { INDEX_op_qemu_st16, { "L", "L" } }, - { INDEX_op_qemu_st32, { "L", "L" } }, - { INDEX_op_qemu_st64, { "L", "L" } }, + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L" } }, #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS - { INDEX_op_qemu_ld8u, { "r", "L" } }, - { INDEX_op_qemu_ld8s, { "r", "L" } }, - { INDEX_op_qemu_ld16u, { "r", "L" } }, - { INDEX_op_qemu_ld16s, { "r", "L" } }, - { INDEX_op_qemu_ld32, { "r", "L" } }, - { INDEX_op_qemu_ld64, { "r", "r", "L" } }, - - { INDEX_op_qemu_st8, { "cb", "L" } }, - { INDEX_op_qemu_st16, { "L", "L" } }, - { INDEX_op_qemu_st32, { "L", "L" } }, - { INDEX_op_qemu_st64, { "L", "L", "L" } }, + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "r", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L", "L" } }, #else - { INDEX_op_qemu_ld8u, { "r", "L", "L" } }, - { INDEX_op_qemu_ld8s, { "r", "L", "L" } }, - { INDEX_op_qemu_ld16u, { "r", "L", "L" } }, - { INDEX_op_qemu_ld16s, { "r", "L", "L" } }, - { INDEX_op_qemu_ld32, { "r", "L", "L" } }, - { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } }, - - { INDEX_op_qemu_st8, { "cb", "L", "L" } }, - { INDEX_op_qemu_st16, { "L", "L", "L" } }, - { INDEX_op_qemu_st32, { "L", "L", "L" } }, - { INDEX_op_qemu_st64, { "L", "L", "L", "L" } }, + { INDEX_op_qemu_ld_i32, { "r", "L", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "r", "L", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } }, #endif { -1 }, }; diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index fa7d966461..92c0fcd36d 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -130,7 +130,7 @@ typedef enum { #define TCG_TARGET_HAS_mulsh_i64 0 #endif -#define TCG_TARGET_HAS_new_ldst 0 +#define TCG_TARGET_HAS_new_ldst 1 #define TCG_TARGET_deposit_i32_valid(ofs, len) \ (((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \ diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c index 68778c2bc4..dc2c2df890 100644 --- a/tcg/ppc/tcg-target.c +++ b/tcg/ppc/tcg-target.c @@ -525,7 +525,7 @@ static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg, static void add_qemu_ldst_label (TCGContext *s, int is_ld, - int opc, + TCGMemOp opc, int data_reg, int data_reg2, int addrlo_reg, @@ -550,32 +550,38 @@ static void add_qemu_ldst_label (TCGContext *s, /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static const void * const qemu_ld_helpers[4] = { - helper_ret_ldub_mmu, - helper_ret_lduw_mmu, - helper_ret_ldul_mmu, - helper_ret_ldq_mmu, +static const void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static const void * const qemu_st_helpers[4] = { - helper_ret_stb_mmu, - helper_ret_stw_mmu, - helper_ret_stl_mmu, - helper_ret_stq_mmu, +static const void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, }; -static void *ld_trampolines[4]; -static void *st_trampolines[4]; +static void *ld_trampolines[16]; +static void *st_trampolines[16]; /* Perform the TLB load and compare. Branches to the slow path, placing the address of the branch in *LABEL_PTR. Loads the addend of the TLB into R0. Clobbers R1 and R2. */ static void tcg_out_tlb_check(TCGContext *s, TCGReg r0, TCGReg r1, TCGReg r2, - TCGReg addrlo, TCGReg addrhi, int s_bits, + TCGReg addrlo, TCGReg addrhi, TCGMemOp s_bits, int mem_index, int is_load, uint8_t **label_ptr) { int cmp_off = @@ -647,50 +653,44 @@ static void tcg_out_tlb_check(TCGContext *s, TCGReg r0, TCGReg r1, TCGReg r2, } #endif -static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { - TCGReg addrlo, datalo, datahi, rbase; - int bswap; + TCGReg addrlo, datalo, datahi, rbase, addrhi __attribute__((unused)); + TCGMemOp opc, bswap; #ifdef CONFIG_SOFTMMU int mem_index; - TCGReg addrhi; uint8_t *label_ptr; #endif datalo = *args++; - datahi = (opc == 3 ? *args++ : 0); + datahi = (is64 ? *args++ : 0); addrlo = *args++; + addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); + opc = *args++; + bswap = opc & MO_BSWAP; #ifdef CONFIG_SOFTMMU - addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); mem_index = *args; - tcg_out_tlb_check(s, TCG_REG_R3, TCG_REG_R4, TCG_REG_R0, addrlo, - addrhi, opc & 3, mem_index, 0, &label_ptr); + addrhi, opc & MO_SIZE, mem_index, 0, &label_ptr); rbase = TCG_REG_R3; #else /* !CONFIG_SOFTMMU */ rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; #endif -#ifdef TARGET_WORDS_BIGENDIAN - bswap = 0; -#else - bswap = 1; -#endif - - switch (opc) { + switch (opc & MO_SSIZE) { default: - case 0: + case MO_UB: tcg_out32(s, LBZX | TAB(datalo, rbase, addrlo)); break; - case 0|4: + case MO_SB: tcg_out32(s, LBZX | TAB(datalo, rbase, addrlo)); tcg_out32(s, EXTSB | RA(datalo) | RS(datalo)); break; - case 1: + case MO_UW: tcg_out32(s, (bswap ? LHBRX : LHZX) | TAB(datalo, rbase, addrlo)); break; - case 1|4: + case MO_SW: if (bswap) { tcg_out32(s, LHBRX | TAB(datalo, rbase, addrlo)); tcg_out32(s, EXTSH | RA(datalo) | RS(datalo)); @@ -698,10 +698,10 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) tcg_out32(s, LHAX | TAB(datalo, rbase, addrlo)); } break; - case 2: + case MO_UL: tcg_out32(s, (bswap ? LWBRX : LWZX) | TAB(datalo, rbase, addrlo)); break; - case 3: + case MO_Q: if (bswap) { tcg_out32(s, ADDI | RT(TCG_REG_R0) | RA(addrlo) | 4); tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); @@ -725,47 +725,44 @@ static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc) #endif } -static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc) +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { - TCGReg addrlo, datalo, datahi, rbase; - int bswap; + TCGReg addrlo, datalo, datahi, rbase, addrhi __attribute__((unused)); + TCGMemOp opc, bswap, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; - TCGReg addrhi; uint8_t *label_ptr; #endif datalo = *args++; - datahi = (opc == 3 ? *args++ : 0); + datahi = (is64 ? *args++ : 0); addrlo = *args++; + addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); + opc = *args++; + bswap = opc & MO_BSWAP; + s_bits = opc & MO_SIZE; #ifdef CONFIG_SOFTMMU - addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0); mem_index = *args; - tcg_out_tlb_check(s, TCG_REG_R3, TCG_REG_R4, TCG_REG_R0, addrlo, - addrhi, opc & 3, mem_index, 0, &label_ptr); + addrhi, s_bits, mem_index, 0, &label_ptr); rbase = TCG_REG_R3; #else /* !CONFIG_SOFTMMU */ rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; #endif -#ifdef TARGET_WORDS_BIGENDIAN - bswap = 0; -#else - bswap = 1; -#endif - switch (opc) { - case 0: + switch (s_bits) { + case MO_8: tcg_out32(s, STBX | SAB(datalo, rbase, addrlo)); break; - case 1: + case MO_16: tcg_out32(s, (bswap ? STHBRX : STHX) | SAB(datalo, rbase, addrlo)); break; - case 2: + case MO_32: + default: tcg_out32(s, (bswap ? STWBRX : STWX) | SAB(datalo, rbase, addrlo)); break; - case 3: + case MO_64: if (bswap) { tcg_out32(s, ADDI | RT(TCG_REG_R0) | RA(addrlo) | 4); tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); @@ -791,6 +788,7 @@ static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc) static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGReg ir, datalo, datahi; + TCGMemOp opc = l->opc; reloc_pc14 (l->label_ptr[0], (uintptr_t)s->code_ptr); @@ -806,22 +804,20 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } tcg_out_movi(s, TCG_TYPE_I32, ir++, l->mem_index); tcg_out32(s, MFSPR | RT(ir++) | LR); - tcg_out_b(s, LK, (uintptr_t)ld_trampolines[l->opc & 3]); + tcg_out_b(s, LK, (uintptr_t)ld_trampolines[opc & ~MO_SIGN]); datalo = l->datalo_reg; - switch (l->opc) { - case 0|4: + switch (opc & MO_SSIZE) { + case MO_SB: tcg_out32(s, EXTSB | RA(datalo) | RS(TCG_REG_R3)); break; - case 1|4: + case MO_SW: tcg_out32(s, EXTSH | RA(datalo) | RS(TCG_REG_R3)); break; - case 0: - case 1: - case 2: + default: tcg_out_mov(s, TCG_TYPE_I32, datalo, TCG_REG_R3); break; - case 3: + case MO_Q: datahi = l->datahi_reg; if (datalo != TCG_REG_R3) { tcg_out_mov(s, TCG_TYPE_I32, datalo, TCG_REG_R4); @@ -842,6 +838,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGReg ir, datalo; + TCGMemOp opc = l->opc; reloc_pc14 (l->label_ptr[0], (tcg_target_long) s->code_ptr); @@ -857,19 +854,19 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } datalo = l->datalo_reg; - switch (l->opc) { - case 0: + switch (opc & MO_SIZE) { + case MO_8: tcg_out32(s, (RLWINM | RA (ir) | RS (datalo) | SH (0) | MB (24) | ME (31))); break; - case 1: + case MO_16: tcg_out32(s, (RLWINM | RA (ir) | RS (datalo) | SH (0) | MB (16) | ME (31))); break; - case 2: + default: tcg_out_mov(s, TCG_TYPE_I32, ir, datalo); break; - case 3: + case MO_64: #ifdef TCG_TARGET_CALL_ALIGN_ARGS ir |= 1; #endif @@ -881,7 +878,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_movi(s, TCG_TYPE_I32, ir++, l->mem_index); tcg_out32(s, MFSPR | RT(ir++) | LR); - tcg_out_b(s, LK, (uintptr_t)st_trampolines[l->opc]); + tcg_out_b(s, LK, (uintptr_t)st_trampolines[opc]); tcg_out_b(s, 0, (uintptr_t)l->raddr); } #endif @@ -956,12 +953,15 @@ static void tcg_target_qemu_prologue (TCGContext *s) tcg_out32 (s, BCLR | BO_ALWAYS); #ifdef CONFIG_SOFTMMU - for (i = 0; i < 4; ++i) { - ld_trampolines[i] = s->code_ptr; - emit_ldst_trampoline (s, qemu_ld_helpers[i]); - - st_trampolines[i] = s->code_ptr; - emit_ldst_trampoline (s, qemu_st_helpers[i]); + for (i = 0; i < 16; ++i) { + if (qemu_ld_helpers[i]) { + ld_trampolines[i] = s->code_ptr; + emit_ldst_trampoline(s, qemu_ld_helpers[i]); + } + if (qemu_st_helpers[i]) { + st_trampolines[i] = s->code_ptr; + emit_ldst_trampoline(s, qemu_st_helpers[i]); + } } #endif } @@ -1706,36 +1706,18 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, tcg_out32 (s, NOR | SAB (args[1], args[0], args[1])); break; - case INDEX_op_qemu_ld8u: + case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, 0); break; - case INDEX_op_qemu_ld8s: - tcg_out_qemu_ld(s, args, 0 | 4); - break; - case INDEX_op_qemu_ld16u: + case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, 1); break; - case INDEX_op_qemu_ld16s: - tcg_out_qemu_ld(s, args, 1 | 4); - break; - case INDEX_op_qemu_ld32: - tcg_out_qemu_ld(s, args, 2); - break; - case INDEX_op_qemu_ld64: - tcg_out_qemu_ld(s, args, 3); - break; - case INDEX_op_qemu_st8: + case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, 0); break; - case INDEX_op_qemu_st16: + case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, 1); break; - case INDEX_op_qemu_st32: - tcg_out_qemu_st(s, args, 2); - break; - case INDEX_op_qemu_st64: - tcg_out_qemu_st(s, args, 3); - break; case INDEX_op_ext8s_i32: tcg_out32 (s, EXTSB | RS (args[1]) | RA (args[0])); @@ -1919,29 +1901,15 @@ static const TCGTargetOpDef ppc_op_defs[] = { { INDEX_op_bswap32_i32, { "r", "r" } }, #if TARGET_LONG_BITS == 32 - { INDEX_op_qemu_ld8u, { "r", "L" } }, - { INDEX_op_qemu_ld8s, { "r", "L" } }, - { INDEX_op_qemu_ld16u, { "r", "L" } }, - { INDEX_op_qemu_ld16s, { "r", "L" } }, - { INDEX_op_qemu_ld32, { "r", "L" } }, - { INDEX_op_qemu_ld64, { "L", "L", "L" } }, - - { INDEX_op_qemu_st8, { "K", "K" } }, - { INDEX_op_qemu_st16, { "K", "K" } }, - { INDEX_op_qemu_st32, { "K", "K" } }, - { INDEX_op_qemu_st64, { "M", "M", "M" } }, + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_ld_i64, { "L", "L", "L" } }, + { INDEX_op_qemu_st_i32, { "K", "K" } }, + { INDEX_op_qemu_st_i64, { "M", "M", "M" } }, #else - { INDEX_op_qemu_ld8u, { "r", "L", "L" } }, - { INDEX_op_qemu_ld8s, { "r", "L", "L" } }, - { INDEX_op_qemu_ld16u, { "r", "L", "L" } }, - { INDEX_op_qemu_ld16s, { "r", "L", "L" } }, - { INDEX_op_qemu_ld32, { "r", "L", "L" } }, - { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } }, - - { INDEX_op_qemu_st8, { "K", "K", "K" } }, - { INDEX_op_qemu_st16, { "K", "K", "K" } }, - { INDEX_op_qemu_st32, { "K", "K", "K" } }, - { INDEX_op_qemu_st64, { "M", "M", "M", "M" } }, + { INDEX_op_qemu_ld_i32, { "r", "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "L", "L", "L", "L" } }, + { INDEX_op_qemu_st_i32, { "K", "K", "K" } }, + { INDEX_op_qemu_st_i64, { "M", "M", "M", "M" } }, #endif { INDEX_op_ext8s_i32, { "r", "r" } }, diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index e3ac6296e0..e3395e301c 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -99,7 +99,7 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_new_ldst 0 +#define TCG_TARGET_HAS_new_ldst 1 #define TCG_AREG0 TCG_REG_R27 diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c index 12c1f61b03..6109d862db 100644 --- a/tcg/ppc64/tcg-target.c +++ b/tcg/ppc64/tcg-target.c @@ -809,22 +809,28 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, } } -static const uint32_t qemu_ldx_opc[8] = { -#ifdef TARGET_WORDS_BIGENDIAN - LBZX, LHZX, LWZX, LDX, - 0, LHAX, LWAX, LDX -#else - LBZX, LHBRX, LWBRX, LDBRX, - 0, 0, 0, LDBRX, -#endif +static const uint32_t qemu_ldx_opc[16] = { + [MO_UB] = LBZX, + [MO_UW] = LHZX, + [MO_UL] = LWZX, + [MO_Q] = LDX, + [MO_SW] = LHAX, + [MO_SL] = LWAX, + [MO_BSWAP | MO_UB] = LBZX, + [MO_BSWAP | MO_UW] = LHBRX, + [MO_BSWAP | MO_UL] = LWBRX, + [MO_BSWAP | MO_Q] = LDBRX, }; -static const uint32_t qemu_stx_opc[4] = { -#ifdef TARGET_WORDS_BIGENDIAN - STBX, STHX, STWX, STDX -#else - STBX, STHBRX, STWBRX, STDBRX, -#endif +static const uint32_t qemu_stx_opc[16] = { + [MO_UB] = STBX, + [MO_UW] = STHX, + [MO_UL] = STWX, + [MO_Q] = STDX, + [MO_BSWAP | MO_UB] = STBX, + [MO_BSWAP | MO_UW] = STHBRX, + [MO_BSWAP | MO_UL] = STWBRX, + [MO_BSWAP | MO_Q] = STDBRX, }; static const uint32_t qemu_exts_opc[4] = { @@ -835,28 +841,34 @@ static const uint32_t qemu_exts_opc[4] = { /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static const void * const qemu_ld_helpers[4] = { - helper_ret_ldub_mmu, - helper_ret_lduw_mmu, - helper_ret_ldul_mmu, - helper_ret_ldq_mmu, +static const void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static const void * const qemu_st_helpers[4] = { - helper_ret_stb_mmu, - helper_ret_stw_mmu, - helper_ret_stl_mmu, - helper_ret_stq_mmu, +static const void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, }; /* Perform the TLB load and compare. Places the result of the comparison in CR7, loads the addend of the TLB into R3, and returns the register containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ -static TCGReg tcg_out_tlb_read(TCGContext *s, int s_bits, TCGReg addr_reg, +static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits, TCGReg addr_reg, int mem_index, bool is_read) { int cmp_off @@ -929,7 +941,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, int s_bits, TCGReg addr_reg, /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, int opc, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, int data_reg, int addr_reg, int mem_index, uint8_t *raddr, uint8_t *label_ptr) { @@ -946,8 +958,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, int opc, static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - int opc = lb->opc; - int s_bits = opc & 3; + TCGMemOp opc = lb->opc; reloc_pc14(lb->label_ptr[0], (uintptr_t)s->code_ptr); @@ -960,10 +971,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, lb->mem_index); tcg_out32(s, MFSPR | RT(TCG_REG_R6) | LR); - tcg_out_call(s, (tcg_target_long)qemu_ld_helpers[s_bits], 1); + tcg_out_call(s, (tcg_target_long)qemu_ld_helpers[opc & ~MO_SIGN], 1); - if (opc & 4) { - uint32_t insn = qemu_exts_opc[s_bits]; + if (opc & MO_SIGN) { + uint32_t insn = qemu_exts_opc[opc & MO_SIZE]; tcg_out32(s, insn | RA(lb->datalo_reg) | RS(TCG_REG_R3)); } else { tcg_out_mov(s, TCG_TYPE_I64, lb->datalo_reg, TCG_REG_R3); @@ -974,7 +985,8 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - int opc = lb->opc; + TCGMemOp opc = lb->opc; + TCGMemOp s_bits = opc & MO_SIZE; reloc_pc14(lb->label_ptr[0], (uintptr_t)s->code_ptr); @@ -985,7 +997,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, lb->addrlo_reg); tcg_out_rld(s, RLDICL, TCG_REG_R5, lb->datalo_reg, - 0, 64 - (1 << (3 + opc))); + 0, 64 - (1 << (3 + s_bits))); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R6, lb->mem_index); tcg_out32(s, MFSPR | RT(TCG_REG_R7) | LR); @@ -995,22 +1007,17 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } #endif /* SOFTMMU */ -static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOp opc, int mem_index) { - TCGReg addr_reg, data_reg, rbase; - uint32_t insn, s_bits; + TCGReg rbase; + uint32_t insn; + TCGMemOp s_bits = opc & MO_SIZE; #ifdef CONFIG_SOFTMMU - int mem_index; void *label_ptr; #endif - data_reg = *args++; - addr_reg = *args++; - s_bits = opc & 3; - #ifdef CONFIG_SOFTMMU - mem_index = *args; - addr_reg = tcg_out_tlb_read(s, s_bits, addr_reg, mem_index, true); /* Load a pointer into the current opcode w/conditional branch-link. */ @@ -1035,7 +1042,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) } else if (insn) { tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg)); } else { - insn = qemu_ldx_opc[s_bits]; + insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg)); insn = qemu_exts_opc[s_bits]; tcg_out32(s, insn | RA(data_reg) | RS(data_reg)); @@ -1047,22 +1054,17 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) #endif } -static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) +static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOp opc, int mem_index) { - TCGReg addr_reg, rbase, data_reg; + TCGReg rbase; uint32_t insn; #ifdef CONFIG_SOFTMMU - int mem_index; void *label_ptr; #endif - data_reg = *args++; - addr_reg = *args++; - #ifdef CONFIG_SOFTMMU - mem_index = *args; - - addr_reg = tcg_out_tlb_read(s, opc, addr_reg, mem_index, false); + addr_reg = tcg_out_tlb_read(s, opc & MO_SIZE, addr_reg, mem_index, false); /* Load a pointer into the current opcode w/conditional branch-link. */ label_ptr = s->code_ptr; @@ -1826,39 +1828,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2])); break; - case INDEX_op_qemu_ld8u: - tcg_out_qemu_ld(s, args, 0); - break; - case INDEX_op_qemu_ld8s: - tcg_out_qemu_ld(s, args, 0 | 4); - break; - case INDEX_op_qemu_ld16u: - tcg_out_qemu_ld(s, args, 1); - break; - case INDEX_op_qemu_ld16s: - tcg_out_qemu_ld(s, args, 1 | 4); - break; - case INDEX_op_qemu_ld32: - case INDEX_op_qemu_ld32u: - tcg_out_qemu_ld(s, args, 2); - break; - case INDEX_op_qemu_ld32s: - tcg_out_qemu_ld(s, args, 2 | 4); - break; - case INDEX_op_qemu_ld64: - tcg_out_qemu_ld(s, args, 3); - break; - case INDEX_op_qemu_st8: - tcg_out_qemu_st(s, args, 0); - break; - case INDEX_op_qemu_st16: - tcg_out_qemu_st(s, args, 1); - break; - case INDEX_op_qemu_st32: - tcg_out_qemu_st(s, args, 2); + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3]); break; - case INDEX_op_qemu_st64: - tcg_out_qemu_st(s, args, 3); + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]); break; case INDEX_op_ext8s_i32: @@ -2121,19 +2097,10 @@ static const TCGTargetOpDef ppc_op_defs[] = { { INDEX_op_neg_i64, { "r", "r" } }, { INDEX_op_not_i64, { "r", "r" } }, - { INDEX_op_qemu_ld8u, { "r", "L" } }, - { INDEX_op_qemu_ld8s, { "r", "L" } }, - { INDEX_op_qemu_ld16u, { "r", "L" } }, - { INDEX_op_qemu_ld16s, { "r", "L" } }, - { INDEX_op_qemu_ld32, { "r", "L" } }, - { INDEX_op_qemu_ld32u, { "r", "L" } }, - { INDEX_op_qemu_ld32s, { "r", "L" } }, - { INDEX_op_qemu_ld64, { "r", "L" } }, - - { INDEX_op_qemu_st8, { "S", "S" } }, - { INDEX_op_qemu_st16, { "S", "S" } }, - { INDEX_op_qemu_st32, { "S", "S" } }, - { INDEX_op_qemu_st64, { "S", "S" } }, + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "S", "S" } }, + { INDEX_op_qemu_st_i64, { "S", "S" } }, { INDEX_op_ext8s_i32, { "r", "r" } }, { INDEX_op_ext16s_i32, { "r", "r" } }, diff --git a/tcg/ppc64/tcg-target.h b/tcg/ppc64/tcg-target.h index 457ea69de6..7ee50b6c6c 100644 --- a/tcg/ppc64/tcg-target.h +++ b/tcg/ppc64/tcg-target.h @@ -123,7 +123,7 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 -#define TCG_TARGET_HAS_new_ldst 0 +#define TCG_TARGET_HAS_new_ldst 1 #define TCG_AREG0 TCG_REG_R27 diff --git a/tcg/tcg-be-ldst.h b/tcg/tcg-be-ldst.h index 2826d296d7..284db0c70d 100644 --- a/tcg/tcg-be-ldst.h +++ b/tcg/tcg-be-ldst.h @@ -25,7 +25,7 @@ typedef struct TCGLabelQemuLdst { int is_ld:1; /* qemu_ld: 1, qemu_st: 0 */ - int opc:4; + TCGMemOp opc:4; TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ |