diff options
author | Aurelien Jarno <aurelien@aurel32.net> | 2010-04-09 20:52:48 +0200 |
---|---|---|
committer | Aurelien Jarno <aurelien@aurel32.net> | 2010-04-19 07:02:53 +0200 |
commit | c8d80cef55522ca68618d5aa3153fd4b8f053573 (patch) | |
tree | fec9d9b1b156ced01c000e16871971fea3453061 /tcg/arm | |
parent | f694a27ed7281ce29f2b86235f79ef16ff759b7e (diff) |
tcg/arm: replace integer values by registers enum
The TCG ARM backends uses integer values to refer to both immediate
values and register number. This makes the code difficult to read.
The patch below replaces all (if I haven't miss any ;-) integer values
representing register number by TCG_REG_* enum values.
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Diffstat (limited to 'tcg/arm')
-rw-r--r-- | tcg/arm/tcg-target.c | 233 |
1 files changed, 124 insertions, 109 deletions
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c index e923330a2f..ee5f7238b9 100644 --- a/tcg/arm/tcg-target.c +++ b/tcg/arm/tcg-target.c @@ -397,7 +397,7 @@ static inline void tcg_out_mul32(TCGContext *s, tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) | (rs << 8) | 0x90 | rm); tcg_out_dat_reg(s, cond, ARITH_MOV, - rd, 0, 8, SHIFT_IMM_LSL(0)); + rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); } } @@ -694,12 +694,13 @@ static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr) tcg_abort(); #else if (cond == COND_AL) { - tcg_out_ld32_12(s, COND_AL, 15, 15, -4); + tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4); tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ } else { tcg_out_movi32(s, cond, TCG_REG_R8, val - 8); tcg_out_dat_reg(s, cond, ARITH_ADD, - 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0)); + TCG_REG_PC, TCG_REG_PC, + TCG_REG_R8, SHIFT_IMM_LSL(0)); } #endif } @@ -717,12 +718,13 @@ static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr) tcg_abort(); #else if (cond == COND_AL) { - tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4); - tcg_out_ld32_12(s, COND_AL, 15, 15, -4); + tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4); + tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4); tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ } else { tcg_out_movi32(s, cond, TCG_REG_R9, addr); - tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15); + tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0, + TCG_REG_PC, SHIFT_IMM_LSL(0)); tcg_out_bx(s, cond, TCG_REG_R9); } #endif @@ -732,7 +734,8 @@ static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr) static inline void tcg_out_callr(TCGContext *s, int cond, int arg) { /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */ - tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0)); + tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0, + TCG_REG_PC, SHIFT_IMM_LSL(0)); tcg_out_bx(s, cond, arg); } @@ -743,7 +746,7 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) if (l->has_value) tcg_out_goto(s, cond, l->u.value); else if (cond == COND_AL) { - tcg_out_ld32_12(s, COND_AL, 15, 15, -4); + tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4); tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337); s->code_ptr += 4; } else { @@ -807,12 +810,12 @@ static inline void tcg_out_qemu_ld(TCGContext *s, int cond, # if CPU_TLB_BITS > 8 # error # endif - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, - 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8, + 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); tcg_out_dat_imm(s, COND_AL, ARITH_AND, - 0, 8, CPU_TLB_SIZE - 1); - tcg_out_dat_reg(s, COND_AL, ARITH_ADD, - 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); + TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1); + tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0, + TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); /* In the * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))] * below, the offset is likely to exceed 12 bits if mem_index != 0 and @@ -821,13 +824,13 @@ static inline void tcg_out_qemu_ld(TCGContext *s, int cond, * before. */ if (mem_index) - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0, + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0, (mem_index << (TLB_SHIFT & 1)) | ((16 - (TLB_SHIFT >> 1)) << 8)); - tcg_out_ld32_12(s, COND_AL, 1, 0, + tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0, offsetof(CPUState, tlb_table[0][0].addr_read)); - tcg_out_dat_reg(s, COND_AL, ARITH_CMP, - 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, + TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); /* Check alignment. */ if (s_bits) tcg_out_dat_imm(s, COND_EQ, ARITH_TST, @@ -835,34 +838,34 @@ static inline void tcg_out_qemu_ld(TCGContext *s, int cond, # if TARGET_LONG_BITS == 64 /* XXX: possibly we could use a block data load or writeback in * the first access. */ - tcg_out_ld32_12(s, COND_EQ, 1, 0, + tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, offsetof(CPUState, tlb_table[0][0].addr_read) + 4); - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, - 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, + TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); # endif - tcg_out_ld32_12(s, COND_EQ, 1, 0, + tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, offsetof(CPUState, tlb_table[0][0].addend)); switch (opc) { case 0: - tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1); + tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1); break; case 0 | 4: - tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1); + tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1); break; case 1: - tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1); + tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1); break; case 1 | 4: - tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1); + tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1); break; case 2: default: - tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1); + tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1); break; case 3: - tcg_out_ld32_rwb(s, COND_EQ, data_reg, 1, addr_reg); - tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4); + tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg); + tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4); break; } @@ -870,16 +873,18 @@ static inline void tcg_out_qemu_ld(TCGContext *s, int cond, tcg_out_b(s, COND_EQ, 8); /* TODO: move this code to where the constants pool will be */ - if (addr_reg) + if (addr_reg != TCG_REG_R0) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 0, 0, addr_reg, SHIFT_IMM_LSL(0)); + TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0)); + } # if TARGET_LONG_BITS == 32 - tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index); + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R1, 0, mem_index); # else - if (addr_reg2 != 1) + if (addr_reg2 != TCG_REG_R1) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); - tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); + TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0)); + } + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index); # endif tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] - (tcg_target_long) s->code_ptr); @@ -887,31 +892,34 @@ static inline void tcg_out_qemu_ld(TCGContext *s, int cond, switch (opc) { case 0 | 4: tcg_out_dat_reg(s, cond, ARITH_MOV, - 0, 0, 0, SHIFT_IMM_LSL(24)); + TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(24)); tcg_out_dat_reg(s, cond, ARITH_MOV, - data_reg, 0, 0, SHIFT_IMM_ASR(24)); + data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(24)); break; case 1 | 4: tcg_out_dat_reg(s, cond, ARITH_MOV, - 0, 0, 0, SHIFT_IMM_LSL(16)); + TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(16)); tcg_out_dat_reg(s, cond, ARITH_MOV, - data_reg, 0, 0, SHIFT_IMM_ASR(16)); + data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(16)); break; case 0: case 1: case 2: default: - if (data_reg) + if (data_reg != TCG_REG_R0) { tcg_out_dat_reg(s, cond, ARITH_MOV, - data_reg, 0, 0, SHIFT_IMM_LSL(0)); + data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0)); + } break; case 3: - if (data_reg != 0) + if (data_reg != TCG_REG_R0) { tcg_out_dat_reg(s, cond, ARITH_MOV, - data_reg, 0, 0, SHIFT_IMM_LSL(0)); - if (data_reg2 != 1) + data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0)); + } + if (data_reg2 != TCG_REG_R1) { tcg_out_dat_reg(s, cond, ARITH_MOV, - data_reg2, 0, 1, SHIFT_IMM_LSL(0)); + data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0)); + } break; } @@ -926,9 +934,9 @@ static inline void tcg_out_qemu_ld(TCGContext *s, int cond, i = ctz32(offset) & ~1; rot = ((32 - i) << 7) & 0xf00; - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg, + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg, ((offset >> i) & 0xff) | rot); - addr_reg = 8; + addr_reg = TCG_REG_R8; offset &= ~(0xff << i); } } @@ -995,11 +1003,11 @@ static inline void tcg_out_qemu_st(TCGContext *s, int cond, * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS */ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, - 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); + TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); tcg_out_dat_imm(s, COND_AL, ARITH_AND, - 0, 8, CPU_TLB_SIZE - 1); - tcg_out_dat_reg(s, COND_AL, ARITH_ADD, - 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); + TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1); + tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, + TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); /* In the * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))] * below, the offset is likely to exceed 12 bits if mem_index != 0 and @@ -1008,13 +1016,13 @@ static inline void tcg_out_qemu_st(TCGContext *s, int cond, * before. */ if (mem_index) - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0, + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0, (mem_index << (TLB_SHIFT & 1)) | ((16 - (TLB_SHIFT >> 1)) << 8)); - tcg_out_ld32_12(s, COND_AL, 1, 0, + tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0, offsetof(CPUState, tlb_table[0][0].addr_write)); - tcg_out_dat_reg(s, COND_AL, ARITH_CMP, - 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, + TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); /* Check alignment. */ if (s_bits) tcg_out_dat_imm(s, COND_EQ, ARITH_TST, @@ -1022,29 +1030,28 @@ static inline void tcg_out_qemu_st(TCGContext *s, int cond, # if TARGET_LONG_BITS == 64 /* XXX: possibly we could use a block data load or writeback in * the first access. */ - tcg_out_ld32_12(s, COND_EQ, 1, 0, - offsetof(CPUState, tlb_table[0][0].addr_write) - + 4); - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, - 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); + tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, + offsetof(CPUState, tlb_table[0][0].addr_write) + 4); + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, + TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); # endif - tcg_out_ld32_12(s, COND_EQ, 1, 0, + tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, offsetof(CPUState, tlb_table[0][0].addend)); switch (opc) { case 0: - tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1); + tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1); break; case 1: - tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, 1); + tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1); break; case 2: default: - tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1); + tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1); break; case 3: - tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg); - tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4); + tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg); + tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4); break; } @@ -1052,69 +1059,77 @@ static inline void tcg_out_qemu_st(TCGContext *s, int cond, tcg_out_b(s, COND_EQ, 8); /* TODO: move this code to where the constants pool will be */ - if (addr_reg) + if (addr_reg != TCG_REG_R0) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 0, 0, addr_reg, SHIFT_IMM_LSL(0)); + TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0)); + } # if TARGET_LONG_BITS == 32 switch (opc) { case 0: - tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff); - tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); + tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_R1, data_reg, 0xff); + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index); break; case 1: tcg_out_dat_reg(s, cond, ARITH_MOV, - 1, 0, data_reg, SHIFT_IMM_LSL(16)); + TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(16)); tcg_out_dat_reg(s, cond, ARITH_MOV, - 1, 0, 1, SHIFT_IMM_LSR(16)); - tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); + TCG_REG_R1, 0, TCG_REG_R1, SHIFT_IMM_LSR(16)); + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index); break; case 2: - if (data_reg != 1) + if (data_reg != TCG_REG_R1) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 1, 0, data_reg, SHIFT_IMM_LSL(0)); - tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); + TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0)); + } + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index); break; case 3: - if (data_reg != 1) + if (data_reg != TCG_REG_R1) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 1, 0, data_reg, SHIFT_IMM_LSL(0)); - if (data_reg2 != 2) + TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0)); + } + if (data_reg2 != TCG_REG_R2) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 2, 0, data_reg2, SHIFT_IMM_LSL(0)); - tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); + TCG_REG_R2, 0, data_reg2, SHIFT_IMM_LSL(0)); + } + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index); break; } # else - if (addr_reg2 != 1) + if (addr_reg2 != TCG_REG_R1) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); + TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0)); + } switch (opc) { case 0: - tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff); - tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); + tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_R2, data_reg, 0xff); + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index); break; case 1: tcg_out_dat_reg(s, cond, ARITH_MOV, - 2, 0, data_reg, SHIFT_IMM_LSL(16)); + TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(16)); tcg_out_dat_reg(s, cond, ARITH_MOV, - 2, 0, 2, SHIFT_IMM_LSR(16)); - tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); + TCG_REG_R2, 0, TCG_REG_R2, SHIFT_IMM_LSR(16)); + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index); break; case 2: - if (data_reg != 2) + if (data_reg != TCG_REG_R2) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 2, 0, data_reg, SHIFT_IMM_LSL(0)); - tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); + TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0)); + } + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index); break; case 3: - tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index); + tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R8, 0, mem_index); tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */ - if (data_reg != 2) + if (data_reg != TCG_REG_R2) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 2, 0, data_reg, SHIFT_IMM_LSL(0)); - if (data_reg2 != 3) + TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0)); + } + if (data_reg2 != TCG_REG_R3) { tcg_out_dat_reg(s, cond, ARITH_MOV, - 3, 0, data_reg2, SHIFT_IMM_LSL(0)); + TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0)); + } break; } # endif @@ -1123,7 +1138,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, int cond, (tcg_target_long) s->code_ptr); # if TARGET_LONG_BITS == 64 if (opc == 3) - tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10); + tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10); # endif *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; @@ -1137,9 +1152,9 @@ static inline void tcg_out_qemu_st(TCGContext *s, int cond, i = ctz32(offset) & ~1; rot = ((32 - i) << 7) & 0xf00; - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg, + tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg, ((offset >> i) & 0xff) | rot); - addr_reg = 8; + addr_reg = TCG_REG_R8; offset &= ~(0xff << i); } } @@ -1176,9 +1191,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, { uint8_t *ld_ptr = s->code_ptr; if (args[0] >> 8) - tcg_out_ld32_12(s, COND_AL, 0, 15, 0); + tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0); else - tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]); + tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]); tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr); if (args[0] >> 8) { *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8; @@ -1193,7 +1208,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; tcg_out_b(s, COND_AL, 8); #else - tcg_out_ld32_12(s, COND_AL, 15, 15, -4); + tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4); s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; tcg_out32(s, 0); #endif @@ -1204,12 +1219,12 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, if (c > 0xfff || c < -0xfff) { tcg_out_movi32(s, COND_AL, TCG_REG_R0, (tcg_target_long) (s->tb_next + args[0])); - tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); + tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0); } else - tcg_out_ld32_12(s, COND_AL, 15, 15, c); + tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c); #else - tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0); - tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); + tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0); + tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0); tcg_out32(s, (tcg_target_long) (s->tb_next + args[0])); #endif } @@ -1335,8 +1350,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, if (const_args[1]) { int rot; rot = encode_imm(args[1]); - tcg_out_dat_imm(s, COND_AL, ARITH_CMP, - 0, args[0], rotl(args[1], rot) | (rot << 7)); + tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, + args[0], rotl(args[1], rot) | (rot << 7)); } else { tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, args[0], args[1], SHIFT_IMM_LSL(0)); @@ -1362,8 +1377,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, if (const_args[2]) { int rot; rot = encode_imm(args[2]); - tcg_out_dat_imm(s, COND_AL, ARITH_CMP, - 0, args[1], rotl(args[2], rot) | (rot << 7)); + tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, + args[1], rotl(args[2], rot) | (rot << 7)); } else { tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, args[1], args[2], SHIFT_IMM_LSL(0)); |