diff options
Diffstat (limited to 'tcg/i386')
-rw-r--r-- | tcg/i386/tcg-target.c.inc | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 909eecd4a3..78160f453b 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1810,8 +1810,6 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, int mem_index, MemOp opc, tcg_insn_unit **label_ptr, int which) { - const TCGReg r0 = TCG_REG_L0; - const TCGReg r1 = TCG_REG_L1; TCGType ttype = TCG_TYPE_I32; TCGType tlbtype = TCG_TYPE_I32; int trexw = 0, hrexw = 0, tlbrexw = 0; @@ -1835,15 +1833,15 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, } } - tcg_out_mov(s, tlbtype, r0, addrlo); - tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0, + tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo); + tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0, + tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index) + offsetof(CPUTLBDescFast, mask)); - tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0, + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index) + offsetof(CPUTLBDescFast, table)); @@ -1851,19 +1849,21 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, copy the address and mask. For lesser alignments, check that we don't cross pages for the complete access. */ if (a_bits >= s_bits) { - tcg_out_mov(s, ttype, r1, addrlo); + tcg_out_mov(s, ttype, TCG_REG_L1, addrlo); } else { - tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask); + tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1, + addrlo, s_mask - a_mask); } tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask; - tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0); + tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0); - /* cmp 0(r0), r1 */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which); + /* cmp 0(TCG_REG_L0), TCG_REG_L1 */ + tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, + TCG_REG_L1, TCG_REG_L0, which); /* Prepare for both the fast path add of the tlb addend, and the slow path function argument setup. */ - tcg_out_mov(s, ttype, r1, addrlo); + tcg_out_mov(s, ttype, TCG_REG_L1, addrlo); /* jne slow_path */ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); @@ -1871,8 +1871,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, s->code_ptr += 4; if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { - /* cmp 4(r0), addrhi */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4); + /* cmp 4(TCG_REG_L0), addrhi */ + tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, which + 4); /* jne slow_path */ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); @@ -1882,8 +1882,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, /* TLB Hit. */ - /* add addend(r0), r1 */ - tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, + /* add addend(TCG_REG_L0), TCG_REG_L1 */ + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L1, TCG_REG_L0, offsetof(CPUTLBEntry, addend)); } |