diff options
-rw-r--r-- | tcg/aarch64/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/arm/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/i386/tcg-target.c | 3 | ||||
-rw-r--r-- | tcg/i386/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/ia64/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/mips/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/optimize.c | 5 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/ppc64/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/s390/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/sparc/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/tcg-op.h | 2 | ||||
-rw-r--r-- | tcg/tcg-opc.h | 114 | ||||
-rw-r--r-- | tcg/tcg.c | 123 | ||||
-rw-r--r-- | tcg/tci/tcg-target.c | 76 | ||||
-rw-r--r-- | tcg/tci/tcg-target.h | 2 | ||||
-rw-r--r-- | tci.c | 322 |
17 files changed, 237 insertions, 428 deletions
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index a32aea66fa..60c7493ac1 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -99,8 +99,6 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 -#define TCG_TARGET_HAS_new_ldst 1 - static inline void flush_icache_range(uintptr_t start, uintptr_t stop) { __builtin___clear_cache((char *)start, (char *)stop); diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h index 73f10c4424..1c719e2862 100644 --- a/tcg/arm/tcg-target.h +++ b/tcg/arm/tcg-target.h @@ -86,8 +86,6 @@ extern bool use_idiv_instructions; #define TCG_TARGET_HAS_div_i32 use_idiv_instructions #define TCG_TARGET_HAS_rem_i32 0 -#define TCG_TARGET_HAS_new_ldst 1 - extern bool tcg_target_deposit_valid(int ofs, int len); #define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index d9102335f9..4133dcf1a9 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -1407,7 +1407,8 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } else { retaddr = TCG_REG_RAX; tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); - tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0); + tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, + TCG_TARGET_CALL_STACK_OFFSET); } } diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index 6c94e5cde0..7a9980e70e 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -130,8 +130,6 @@ extern bool have_bmi1; #define TCG_TARGET_HAS_mulsh_i64 0 #endif -#define TCG_TARGET_HAS_new_ldst 1 - #define TCG_TARGET_deposit_i32_valid(ofs, len) \ (((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \ ((ofs) == 0 && (len) == 16)) diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h index 3a59b50349..d67558988a 100644 --- a/tcg/ia64/tcg-target.h +++ b/tcg/ia64/tcg-target.h @@ -160,8 +160,6 @@ typedef enum { #define TCG_TARGET_HAS_mulsh_i64 0 #define TCG_TARGET_HAS_trunc_shr_i32 0 -#define TCG_TARGET_HAS_new_ldst 1 - #define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16) #define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16) diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index b5face8b4d..c88a1c9272 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -118,8 +118,6 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions #define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions -#define TCG_TARGET_HAS_new_ldst 1 - /* optional instructions automatically implemented */ #define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */ #define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */ diff --git a/tcg/optimize.c b/tcg/optimize.c index 77da2f942a..16cebbe16d 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -889,17 +889,12 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, break; CASE_OP_32_64(ld8u): - case INDEX_op_qemu_ld8u: mask = 0xff; break; CASE_OP_32_64(ld16u): - case INDEX_op_qemu_ld16u: mask = 0xffff; break; case INDEX_op_ld32u_i64: -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_qemu_ld32u: -#endif mask = 0xffffffffu; break; diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index dd7e557948..05069ae2d8 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -100,8 +100,6 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i32 0 #define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_new_ldst 1 - #define TCG_AREG0 TCG_REG_R27 #define tcg_qemu_tb_exec(env, tb_ptr) \ diff --git a/tcg/ppc64/tcg-target.h b/tcg/ppc64/tcg-target.h index 29f479a3cf..f2360c869a 100644 --- a/tcg/ppc64/tcg-target.h +++ b/tcg/ppc64/tcg-target.h @@ -124,8 +124,6 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i64 1 #define TCG_TARGET_HAS_mulsh_i64 1 -#define TCG_TARGET_HAS_new_ldst 1 - #define TCG_AREG0 TCG_REG_R27 #define TCG_TARGET_EXTEND_ARGS 1 diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h index ad2c6ddaf4..5acc28ca6b 100644 --- a/tcg/s390/tcg-target.h +++ b/tcg/s390/tcg-target.h @@ -100,8 +100,6 @@ typedef enum TCGReg { #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 -#define TCG_TARGET_HAS_new_ldst 1 - extern bool tcg_target_deposit_valid(int ofs, int len); #define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid #define TCG_TARGET_deposit_i64_valid tcg_target_deposit_valid diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h index 473bfc71fd..089f9761ca 100644 --- a/tcg/sparc/tcg-target.h +++ b/tcg/sparc/tcg-target.h @@ -140,8 +140,6 @@ typedef enum { #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 -#define TCG_TARGET_HAS_new_ldst 1 - #define TCG_AREG0 TCG_REG_I0 static inline void flush_icache_range(uintptr_t start, uintptr_t stop) diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 719533ac39..019dd9b6fb 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -2718,7 +2718,7 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) #define tcg_gen_bswap16_tl tcg_gen_bswap16_i32 #define tcg_gen_bswap32_tl tcg_gen_bswap32_i32 #define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64 -#define tcg_gen_extr_tl_i64 tcg_gen_extr_i32_i64 +#define tcg_gen_extr_i64_tl tcg_gen_extr_i64_i32 #define tcg_gen_andc_tl tcg_gen_andc_i32 #define tcg_gen_eqv_tl tcg_gen_eqv_i32 #define tcg_gen_nand_tl tcg_gen_nand_i32 diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h index 71ba64ac16..042d442c7e 100644 --- a/tcg/tcg-opc.h +++ b/tcg/tcg-opc.h @@ -185,106 +185,20 @@ DEF(debug_insn_start, 0, 0, 1, TCG_OPF_NOT_PRESENT) DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END) DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END) -#define IMPL_NEW_LDST \ - (TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS \ - | IMPL(TCG_TARGET_HAS_new_ldst)) - -#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS -DEF(qemu_ld_i32, 1, 1, 2, IMPL_NEW_LDST) -DEF(qemu_st_i32, 0, 2, 2, IMPL_NEW_LDST) -# if TCG_TARGET_REG_BITS == 64 -DEF(qemu_ld_i64, 1, 1, 2, IMPL_NEW_LDST | TCG_OPF_64BIT) -DEF(qemu_st_i64, 0, 2, 2, IMPL_NEW_LDST | TCG_OPF_64BIT) -# else -DEF(qemu_ld_i64, 2, 1, 2, IMPL_NEW_LDST | TCG_OPF_64BIT) -DEF(qemu_st_i64, 0, 3, 2, IMPL_NEW_LDST | TCG_OPF_64BIT) -# endif -#else -DEF(qemu_ld_i32, 1, 2, 2, IMPL_NEW_LDST) -DEF(qemu_st_i32, 0, 3, 2, IMPL_NEW_LDST) -DEF(qemu_ld_i64, 2, 2, 2, IMPL_NEW_LDST | TCG_OPF_64BIT) -DEF(qemu_st_i64, 0, 4, 2, IMPL_NEW_LDST | TCG_OPF_64BIT) -#endif - -#undef IMPL_NEW_LDST - -#define IMPL_OLD_LDST \ - (TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS \ - | IMPL(!TCG_TARGET_HAS_new_ldst)) - -#if TCG_TARGET_REG_BITS == 32 -#if TARGET_LONG_BITS == 32 -DEF(qemu_ld8u, 1, 1, 1, IMPL_OLD_LDST) -#else -DEF(qemu_ld8u, 1, 2, 1, IMPL_OLD_LDST) -#endif -#if TARGET_LONG_BITS == 32 -DEF(qemu_ld8s, 1, 1, 1, IMPL_OLD_LDST) -#else -DEF(qemu_ld8s, 1, 2, 1, IMPL_OLD_LDST) -#endif -#if TARGET_LONG_BITS == 32 -DEF(qemu_ld16u, 1, 1, 1, IMPL_OLD_LDST) -#else -DEF(qemu_ld16u, 1, 2, 1, IMPL_OLD_LDST) -#endif -#if TARGET_LONG_BITS == 32 -DEF(qemu_ld16s, 1, 1, 1, IMPL_OLD_LDST) -#else -DEF(qemu_ld16s, 1, 2, 1, IMPL_OLD_LDST) -#endif -#if TARGET_LONG_BITS == 32 -DEF(qemu_ld32, 1, 1, 1, IMPL_OLD_LDST) -#else -DEF(qemu_ld32, 1, 2, 1, IMPL_OLD_LDST) -#endif -#if TARGET_LONG_BITS == 32 -DEF(qemu_ld64, 2, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -#else -DEF(qemu_ld64, 2, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -#endif - -#if TARGET_LONG_BITS == 32 -DEF(qemu_st8, 0, 2, 1, IMPL_OLD_LDST) -#else -DEF(qemu_st8, 0, 3, 1, IMPL_OLD_LDST) -#endif -#if TARGET_LONG_BITS == 32 -DEF(qemu_st16, 0, 2, 1, IMPL_OLD_LDST) -#else -DEF(qemu_st16, 0, 3, 1, IMPL_OLD_LDST) -#endif -#if TARGET_LONG_BITS == 32 -DEF(qemu_st32, 0, 2, 1, IMPL_OLD_LDST) -#else -DEF(qemu_st32, 0, 3, 1, IMPL_OLD_LDST) -#endif -#if TARGET_LONG_BITS == 32 -DEF(qemu_st64, 0, 3, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -#else -DEF(qemu_st64, 0, 4, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -#endif - -#else /* TCG_TARGET_REG_BITS == 32 */ - -DEF(qemu_ld8u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_ld8s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_ld16u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_ld16s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_ld32, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_ld32u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_ld32s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_ld64, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) - -DEF(qemu_st8, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_st16, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_st32, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) -DEF(qemu_st64, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT) - -#endif /* TCG_TARGET_REG_BITS != 32 */ - -#undef IMPL_OLD_LDST - +#define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2) +#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2) + +DEF(qemu_ld_i32, 1, TLADDR_ARGS, 2, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) +DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 2, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) +DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 2, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) +DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 2, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) + +#undef TLADDR_ARGS +#undef DATA64_ARGS #undef IMPL #undef IMPL64 #undef DEF @@ -941,97 +941,26 @@ static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) return op; } -static const TCGOpcode old_ld_opc[8] = { - [MO_UB] = INDEX_op_qemu_ld8u, - [MO_SB] = INDEX_op_qemu_ld8s, - [MO_UW] = INDEX_op_qemu_ld16u, - [MO_SW] = INDEX_op_qemu_ld16s, -#if TCG_TARGET_REG_BITS == 32 - [MO_UL] = INDEX_op_qemu_ld32, - [MO_SL] = INDEX_op_qemu_ld32, -#else - [MO_UL] = INDEX_op_qemu_ld32u, - [MO_SL] = INDEX_op_qemu_ld32s, -#endif - [MO_Q] = INDEX_op_qemu_ld64, -}; - -static const TCGOpcode old_st_opc[4] = { - [MO_UB] = INDEX_op_qemu_st8, - [MO_UW] = INDEX_op_qemu_st16, - [MO_UL] = INDEX_op_qemu_st32, - [MO_Q] = INDEX_op_qemu_st64, -}; - void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) { memop = tcg_canonicalize_memop(memop, 0, 0); - if (TCG_TARGET_HAS_new_ldst) { - *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i32; - tcg_add_param_i32(val); - tcg_add_param_tl(addr); - *tcg_ctx.gen_opparam_ptr++ = memop; - *tcg_ctx.gen_opparam_ptr++ = idx; - return; - } - - /* The old opcodes only support target-endian memory operations. */ - assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8); - assert(old_ld_opc[memop & MO_SSIZE] != 0); - - if (TCG_TARGET_REG_BITS == 32) { - *tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE]; - tcg_add_param_i32(val); - tcg_add_param_tl(addr); - *tcg_ctx.gen_opparam_ptr++ = idx; - } else { - TCGv_i64 val64 = tcg_temp_new_i64(); - - *tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE]; - tcg_add_param_i64(val64); - tcg_add_param_tl(addr); - *tcg_ctx.gen_opparam_ptr++ = idx; - - tcg_gen_trunc_i64_i32(val, val64); - tcg_temp_free_i64(val64); - } + *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i32; + tcg_add_param_i32(val); + tcg_add_param_tl(addr); + *tcg_ctx.gen_opparam_ptr++ = memop; + *tcg_ctx.gen_opparam_ptr++ = idx; } void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) { memop = tcg_canonicalize_memop(memop, 0, 1); - if (TCG_TARGET_HAS_new_ldst) { - *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i32; - tcg_add_param_i32(val); - tcg_add_param_tl(addr); - *tcg_ctx.gen_opparam_ptr++ = memop; - *tcg_ctx.gen_opparam_ptr++ = idx; - return; - } - - /* The old opcodes only support target-endian memory operations. */ - assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8); - assert(old_st_opc[memop & MO_SIZE] != 0); - - if (TCG_TARGET_REG_BITS == 32) { - *tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE]; - tcg_add_param_i32(val); - tcg_add_param_tl(addr); - *tcg_ctx.gen_opparam_ptr++ = idx; - } else { - TCGv_i64 val64 = tcg_temp_new_i64(); - - tcg_gen_extu_i32_i64(val64, val); - - *tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE]; - tcg_add_param_i64(val64); - tcg_add_param_tl(addr); - *tcg_ctx.gen_opparam_ptr++ = idx; - - tcg_temp_free_i64(val64); - } + *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i32; + tcg_add_param_i32(val); + tcg_add_param_tl(addr); + *tcg_ctx.gen_opparam_ptr++ = memop; + *tcg_ctx.gen_opparam_ptr++ = idx; } void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) @@ -1050,22 +979,10 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) } #endif - if (TCG_TARGET_HAS_new_ldst) { - *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i64; - tcg_add_param_i64(val); - tcg_add_param_tl(addr); - *tcg_ctx.gen_opparam_ptr++ = memop; - *tcg_ctx.gen_opparam_ptr++ = idx; - return; - } - - /* The old opcodes only support target-endian memory operations. */ - assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8); - assert(old_ld_opc[memop & MO_SSIZE] != 0); - - *tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE]; + *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i64; tcg_add_param_i64(val); tcg_add_param_tl(addr); + *tcg_ctx.gen_opparam_ptr++ = memop; *tcg_ctx.gen_opparam_ptr++ = idx; } @@ -1080,22 +997,10 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) } #endif - if (TCG_TARGET_HAS_new_ldst) { - *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i64; - tcg_add_param_i64(val); - tcg_add_param_tl(addr); - *tcg_ctx.gen_opparam_ptr++ = memop; - *tcg_ctx.gen_opparam_ptr++ = idx; - return; - } - - /* The old opcodes only support target-endian memory operations. */ - assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8); - assert(old_st_opc[memop & MO_SIZE] != 0); - - *tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE]; + *tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i64; tcg_add_param_i64(val); tcg_add_param_tl(addr); + *tcg_ctx.gen_opparam_ptr++ = memop; *tcg_ctx.gen_opparam_ptr++ = idx; } diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c index 375e590d2b..03a7b46958 100644 --- a/tcg/tci/tcg-target.c +++ b/tcg/tci/tcg-target.c @@ -227,21 +227,11 @@ static const TCGTargetOpDef tcg_target_op_defs[] = { #endif #endif /* TCG_TARGET_REG_BITS == 64 */ - { INDEX_op_qemu_ld8u, { R, L } }, - { INDEX_op_qemu_ld8s, { R, L } }, - { INDEX_op_qemu_ld16u, { R, L } }, - { INDEX_op_qemu_ld16s, { R, L } }, - { INDEX_op_qemu_ld32, { R, L } }, -#if TCG_TARGET_REG_BITS == 64 - { INDEX_op_qemu_ld32u, { R, L } }, - { INDEX_op_qemu_ld32s, { R, L } }, -#endif - { INDEX_op_qemu_ld64, { R64, L } }, + { INDEX_op_qemu_ld_i32, { R, L } }, + { INDEX_op_qemu_ld_i64, { R64, L } }, - { INDEX_op_qemu_st8, { R, S } }, - { INDEX_op_qemu_st16, { R, S } }, - { INDEX_op_qemu_st32, { R, S } }, - { INDEX_op_qemu_st64, { R64, S } }, + { INDEX_op_qemu_st_i32, { R, S } }, + { INDEX_op_qemu_st_i64, { R64, S } }, #if TCG_TARGET_HAS_ext8s_i32 { INDEX_op_ext8s_i32, { R, R } }, @@ -767,58 +757,52 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, tcg_out8(s, args[2]); /* condition */ tci_out_label(s, args[3]); break; - case INDEX_op_qemu_ld8u: - case INDEX_op_qemu_ld8s: - case INDEX_op_qemu_ld16u: - case INDEX_op_qemu_ld16s: - case INDEX_op_qemu_ld32: -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_qemu_ld32s: - case INDEX_op_qemu_ld32u: -#endif + case INDEX_op_qemu_ld_i32: tcg_out_r(s, *args++); tcg_out_r(s, *args++); -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS - tcg_out_r(s, *args++); -#endif + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_out_r(s, *args++); + } + tcg_out_i(s, *args++); #ifdef CONFIG_SOFTMMU tcg_out_i(s, *args); #endif break; - case INDEX_op_qemu_ld64: - tcg_out_r(s, *args++); -#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_qemu_ld_i64: tcg_out_r(s, *args++); -#endif - tcg_out_r(s, *args++); -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_r(s, *args++); + } tcg_out_r(s, *args++); -#endif + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_out_r(s, *args++); + } + tcg_out_i(s, *args++); #ifdef CONFIG_SOFTMMU tcg_out_i(s, *args); #endif break; - case INDEX_op_qemu_st8: - case INDEX_op_qemu_st16: - case INDEX_op_qemu_st32: + case INDEX_op_qemu_st_i32: tcg_out_r(s, *args++); tcg_out_r(s, *args++); -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS - tcg_out_r(s, *args++); -#endif + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_out_r(s, *args++); + } + tcg_out_i(s, *args++); #ifdef CONFIG_SOFTMMU tcg_out_i(s, *args); #endif break; - case INDEX_op_qemu_st64: - tcg_out_r(s, *args++); -#if TCG_TARGET_REG_BITS == 32 - tcg_out_r(s, *args++); -#endif + case INDEX_op_qemu_st_i64: tcg_out_r(s, *args++); -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_r(s, *args++); + } tcg_out_r(s, *args++); -#endif + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_out_r(s, *args++); + } + tcg_out_i(s, *args++); #ifdef CONFIG_SOFTMMU tcg_out_i(s, *args); #endif diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index 0be5acdb81..bd1e97468c 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -118,8 +118,6 @@ #define TCG_TARGET_HAS_mulu2_i32 1 #endif /* TCG_TARGET_REG_BITS == 64 */ -#define TCG_TARGET_HAS_new_ldst 0 - /* Number of registers available. For 32 bit hosts, we need more than 8 registers (call arguments). */ /* #define TCG_TARGET_NB_REGS 8 */ @@ -116,16 +116,6 @@ static void tci_write_reg(TCGReg index, tcg_target_ulong value) tci_reg[index] = value; } -static void tci_write_reg8s(TCGReg index, int8_t value) -{ - tci_write_reg(index, value); -} - -static void tci_write_reg16s(TCGReg index, int16_t value) -{ - tci_write_reg(index, value); -} - #if TCG_TARGET_REG_BITS == 64 static void tci_write_reg32s(TCGReg index, int32_t value) { @@ -138,11 +128,6 @@ static void tci_write_reg8(TCGReg index, uint8_t value) tci_write_reg(index, value); } -static void tci_write_reg16(TCGReg index, uint16_t value) -{ - tci_write_reg(index, value); -} - static void tci_write_reg32(TCGReg index, uint32_t value) { tci_write_reg(index, value); @@ -433,6 +418,53 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) return result; } +#ifdef CONFIG_SOFTMMU +# define mmuidx tci_read_i(&tb_ptr) +# define qemu_ld_ub \ + helper_ret_ldub_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr) +# define qemu_ld_leuw \ + helper_le_lduw_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr) +# define qemu_ld_leul \ + helper_le_ldul_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr) +# define qemu_ld_leq \ + helper_le_ldq_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr) +# define qemu_ld_beuw \ + helper_be_lduw_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr) +# define qemu_ld_beul \ + helper_be_ldul_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr) +# define qemu_ld_beq \ + helper_be_ldq_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr) +# define qemu_st_b(X) \ + helper_ret_stb_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr) +# define qemu_st_lew(X) \ + helper_le_stw_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr) +# define qemu_st_lel(X) \ + helper_le_stl_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr) +# define qemu_st_leq(X) \ + helper_le_stq_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr) +# define qemu_st_bew(X) \ + helper_be_stw_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr) +# define qemu_st_bel(X) \ + helper_be_stl_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr) +# define qemu_st_beq(X) \ + helper_be_stq_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr) +#else +# define qemu_ld_ub ldub_p(g2h(taddr)) +# define qemu_ld_leuw lduw_le_p(g2h(taddr)) +# define qemu_ld_leul (uint32_t)ldl_le_p(g2h(taddr)) +# define qemu_ld_leq ldq_le_p(g2h(taddr)) +# define qemu_ld_beuw lduw_be_p(g2h(taddr)) +# define qemu_ld_beul (uint32_t)ldl_be_p(g2h(taddr)) +# define qemu_ld_beq ldq_be_p(g2h(taddr)) +# define qemu_st_b(X) stb_p(g2h(taddr), X) +# define qemu_st_lew(X) stw_le_p(g2h(taddr), X) +# define qemu_st_lel(X) stl_le_p(g2h(taddr), X) +# define qemu_st_leq(X) stq_le_p(g2h(taddr), X) +# define qemu_st_bew(X) stw_be_p(g2h(taddr), X) +# define qemu_st_bel(X) stl_be_p(g2h(taddr), X) +# define qemu_st_beq(X) stq_be_p(g2h(taddr), X) +#endif + /* Interpret pseudo code in tb. */ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) { @@ -456,9 +488,6 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) tcg_target_ulong label; TCGCond condition; target_ulong taddr; -#ifndef CONFIG_SOFTMMU - tcg_target_ulong host_addr; -#endif uint8_t tmp8; uint16_t tmp16; uint32_t tmp32; @@ -466,6 +495,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) #if TCG_TARGET_REG_BITS == 32 uint64_t v64; #endif + TCGMemOp memop; #if defined(GETPC) tci_tb_ptr = (uintptr_t)tb_ptr; @@ -1086,145 +1116,145 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) assert(tb_ptr == old_code_ptr + op_size); tb_ptr += (int32_t)t0; continue; - case INDEX_op_qemu_ld8u: - t0 = *tb_ptr++; - taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - tmp8 = helper_ldb_mmu(env, taddr, tci_read_i(&tb_ptr)); -#else - host_addr = (tcg_target_ulong)taddr; - tmp8 = *(uint8_t *)(host_addr + GUEST_BASE); -#endif - tci_write_reg8(t0, tmp8); - break; - case INDEX_op_qemu_ld8s: + case INDEX_op_qemu_ld_i32: t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - tmp8 = helper_ldb_mmu(env, taddr, tci_read_i(&tb_ptr)); -#else - host_addr = (tcg_target_ulong)taddr; - tmp8 = *(uint8_t *)(host_addr + GUEST_BASE); -#endif - tci_write_reg8s(t0, tmp8); - break; - case INDEX_op_qemu_ld16u: - t0 = *tb_ptr++; - taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - tmp16 = helper_ldw_mmu(env, taddr, tci_read_i(&tb_ptr)); -#else - host_addr = (tcg_target_ulong)taddr; - tmp16 = tswap16(*(uint16_t *)(host_addr + GUEST_BASE)); -#endif - tci_write_reg16(t0, tmp16); - break; - case INDEX_op_qemu_ld16s: - t0 = *tb_ptr++; - taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - tmp16 = helper_ldw_mmu(env, taddr, tci_read_i(&tb_ptr)); -#else - host_addr = (tcg_target_ulong)taddr; - tmp16 = tswap16(*(uint16_t *)(host_addr + GUEST_BASE)); -#endif - tci_write_reg16s(t0, tmp16); - break; -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_qemu_ld32u: - t0 = *tb_ptr++; - taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - tmp32 = helper_ldl_mmu(env, taddr, tci_read_i(&tb_ptr)); -#else - host_addr = (tcg_target_ulong)taddr; - tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE)); -#endif - tci_write_reg32(t0, tmp32); - break; - case INDEX_op_qemu_ld32s: - t0 = *tb_ptr++; - taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - tmp32 = helper_ldl_mmu(env, taddr, tci_read_i(&tb_ptr)); -#else - host_addr = (tcg_target_ulong)taddr; - tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE)); -#endif - tci_write_reg32s(t0, tmp32); - break; -#endif /* TCG_TARGET_REG_BITS == 64 */ - case INDEX_op_qemu_ld32: - t0 = *tb_ptr++; - taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - tmp32 = helper_ldl_mmu(env, taddr, tci_read_i(&tb_ptr)); -#else - host_addr = (tcg_target_ulong)taddr; - tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE)); -#endif - tci_write_reg32(t0, tmp32); + memop = tci_read_i(&tb_ptr); + switch (memop) { + case MO_UB: + tmp32 = qemu_ld_ub; + break; + case MO_SB: + tmp32 = (int8_t)qemu_ld_ub; + break; + case MO_LEUW: + tmp32 = qemu_ld_leuw; + break; + case MO_LESW: + tmp32 = (int16_t)qemu_ld_leuw; + break; + case MO_LEUL: + tmp32 = qemu_ld_leul; + break; + case MO_BEUW: + tmp32 = qemu_ld_beuw; + break; + case MO_BESW: + tmp32 = (int16_t)qemu_ld_beuw; + break; + case MO_BEUL: + tmp32 = qemu_ld_beul; + break; + default: + tcg_abort(); + } + tci_write_reg(t0, tmp32); break; - case INDEX_op_qemu_ld64: + case INDEX_op_qemu_ld_i64: t0 = *tb_ptr++; -#if TCG_TARGET_REG_BITS == 32 - t1 = *tb_ptr++; -#endif + if (TCG_TARGET_REG_BITS == 32) { + t1 = *tb_ptr++; + } taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - tmp64 = helper_ldq_mmu(env, taddr, tci_read_i(&tb_ptr)); -#else - host_addr = (tcg_target_ulong)taddr; - tmp64 = tswap64(*(uint64_t *)(host_addr + GUEST_BASE)); -#endif + memop = tci_read_i(&tb_ptr); + switch (memop) { + case MO_UB: + tmp64 = qemu_ld_ub; + break; + case MO_SB: + tmp64 = (int8_t)qemu_ld_ub; + break; + case MO_LEUW: + tmp64 = qemu_ld_leuw; + break; + case MO_LESW: + tmp64 = (int16_t)qemu_ld_leuw; + break; + case MO_LEUL: + tmp64 = qemu_ld_leul; + break; + case MO_LESL: + tmp64 = (int32_t)qemu_ld_leul; + break; + case MO_LEQ: + tmp64 = qemu_ld_leq; + break; + case MO_BEUW: + tmp64 = qemu_ld_beuw; + break; + case MO_BESW: + tmp64 = (int16_t)qemu_ld_beuw; + break; + case MO_BEUL: + tmp64 = qemu_ld_beul; + break; + case MO_BESL: + tmp64 = (int32_t)qemu_ld_beul; + break; + case MO_BEQ: + tmp64 = qemu_ld_beq; + break; + default: + tcg_abort(); + } tci_write_reg(t0, tmp64); -#if TCG_TARGET_REG_BITS == 32 - tci_write_reg(t1, tmp64 >> 32); -#endif - break; - case INDEX_op_qemu_st8: - t0 = tci_read_r8(&tb_ptr); - taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - t2 = tci_read_i(&tb_ptr); - helper_stb_mmu(env, taddr, t0, t2); -#else - host_addr = (tcg_target_ulong)taddr; - *(uint8_t *)(host_addr + GUEST_BASE) = t0; -#endif - break; - case INDEX_op_qemu_st16: - t0 = tci_read_r16(&tb_ptr); - taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - t2 = tci_read_i(&tb_ptr); - helper_stw_mmu(env, taddr, t0, t2); -#else - host_addr = (tcg_target_ulong)taddr; - *(uint16_t *)(host_addr + GUEST_BASE) = tswap16(t0); -#endif + if (TCG_TARGET_REG_BITS == 32) { + tci_write_reg(t1, tmp64 >> 32); + } break; - case INDEX_op_qemu_st32: - t0 = tci_read_r32(&tb_ptr); + case INDEX_op_qemu_st_i32: + t0 = tci_read_r(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - t2 = tci_read_i(&tb_ptr); - helper_stl_mmu(env, taddr, t0, t2); -#else - host_addr = (tcg_target_ulong)taddr; - *(uint32_t *)(host_addr + GUEST_BASE) = tswap32(t0); -#endif + memop = tci_read_i(&tb_ptr); + switch (memop) { + case MO_UB: + qemu_st_b(t0); + break; + case MO_LEUW: + qemu_st_lew(t0); + break; + case MO_LEUL: + qemu_st_lel(t0); + break; + case MO_BEUW: + qemu_st_bew(t0); + break; + case MO_BEUL: + qemu_st_bel(t0); + break; + default: + tcg_abort(); + } break; - case INDEX_op_qemu_st64: + case INDEX_op_qemu_st_i64: tmp64 = tci_read_r64(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); -#ifdef CONFIG_SOFTMMU - t2 = tci_read_i(&tb_ptr); - helper_stq_mmu(env, taddr, tmp64, t2); -#else - host_addr = (tcg_target_ulong)taddr; - *(uint64_t *)(host_addr + GUEST_BASE) = tswap64(tmp64); -#endif + memop = tci_read_i(&tb_ptr); + switch (memop) { + case MO_UB: + qemu_st_b(tmp64); + break; + case MO_LEUW: + qemu_st_lew(tmp64); + break; + case MO_LEUL: + qemu_st_lel(tmp64); + break; + case MO_LEQ: + qemu_st_leq(tmp64); + break; + case MO_BEUW: + qemu_st_bew(tmp64); + break; + case MO_BEUL: + qemu_st_bel(tmp64); + break; + case MO_BEQ: + qemu_st_beq(tmp64); + break; + default: + tcg_abort(); + } break; default: TODO(); |