diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2021-01-27 19:35:07 -1000 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2021-02-05 10:24:14 -1000 |
commit | 5410e4347b7914aa1f2aa29120cd976a9acd5b1d (patch) | |
tree | af72efa4a12b181e0271a2281bffda4a5b465e58 | |
parent | 85bbbf7088dae27a110d6669801694af127c5158 (diff) |
tcg/tci: Inline tci_write_reg64 into 64-bit callers
Note that we had two functions of the same name: a 32-bit version
which took two register numbers and a 64-bit version which was a
no-op wrapper for tcg_write_reg. After this, we are left with
only the 32-bit version.
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r-- | tcg/tci.c | 60 |
1 files changed, 27 insertions, 33 deletions
@@ -124,12 +124,6 @@ static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, tci_write_reg(regs, low_index, value); tci_write_reg(regs, high_index, value >> 32); } -#elif TCG_TARGET_REG_BITS == 64 -static void -tci_write_reg64(tcg_target_ulong *regs, TCGReg index, uint64_t value) -{ - tci_write_reg(regs, index, value); -} #endif #if TCG_TARGET_REG_BITS == 32 @@ -559,7 +553,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, t1 = tci_read_r64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); condition = *tb_ptr++; - tci_write_reg64(regs, t0, tci_compare64(t1, t2, condition)); + tci_write_reg(regs, t0, tci_compare64(t1, t2, condition)); break; #endif case INDEX_op_mov_i32: @@ -839,12 +833,12 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, case INDEX_op_mov_i64: t0 = *tb_ptr++; t1 = tci_read_r64(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1); + tci_write_reg(regs, t0, t1); break; case INDEX_op_tci_movi_i64: t0 = *tb_ptr++; t1 = tci_read_i64(&tb_ptr); - tci_write_reg64(regs, t0, t1); + tci_write_reg(regs, t0, t1); break; /* Load/store operations (64 bit). */ @@ -886,7 +880,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, t0 = *tb_ptr++; t1 = tci_read_r(regs, &tb_ptr); t2 = tci_read_s32(&tb_ptr); - tci_write_reg64(regs, t0, *(uint64_t *)(t1 + t2)); + tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2)); break; case INDEX_op_st8_i64: t0 = tci_read_r8(regs, &tb_ptr); @@ -920,19 +914,19 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1 + t2); + tci_write_reg(regs, t0, t1 + t2); break; case INDEX_op_sub_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1 - t2); + tci_write_reg(regs, t0, t1 - t2); break; case INDEX_op_mul_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1 * t2); + tci_write_reg(regs, t0, t1 * t2); break; #if TCG_TARGET_HAS_div_i64 case INDEX_op_div_i64: @@ -951,19 +945,19 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1 & t2); + tci_write_reg(regs, t0, t1 & t2); break; case INDEX_op_or_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1 | t2); + tci_write_reg(regs, t0, t1 | t2); break; case INDEX_op_xor_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1 ^ t2); + tci_write_reg(regs, t0, t1 ^ t2); break; /* Shift/rotate operations (64 bit). */ @@ -972,32 +966,32 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1 << (t2 & 63)); + tci_write_reg(regs, t0, t1 << (t2 & 63)); break; case INDEX_op_shr_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1 >> (t2 & 63)); + tci_write_reg(regs, t0, t1 >> (t2 & 63)); break; case INDEX_op_sar_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, ((int64_t)t1 >> (t2 & 63))); + tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63))); break; #if TCG_TARGET_HAS_rot_i64 case INDEX_op_rotl_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, rol64(t1, t2 & 63)); + tci_write_reg(regs, t0, rol64(t1, t2 & 63)); break; case INDEX_op_rotr_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(regs, &tb_ptr); t2 = tci_read_ri64(regs, &tb_ptr); - tci_write_reg64(regs, t0, ror64(t1, t2 & 63)); + tci_write_reg(regs, t0, ror64(t1, t2 & 63)); break; #endif #if TCG_TARGET_HAS_deposit_i64 @@ -1008,7 +1002,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tmp16 = *tb_ptr++; tmp8 = *tb_ptr++; tmp64 = (((1ULL << tmp8) - 1) << tmp16); - tci_write_reg64(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64)); + tci_write_reg(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64)); break; #endif case INDEX_op_brcond_i64: @@ -1026,28 +1020,28 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, case INDEX_op_ext8u_i64: t0 = *tb_ptr++; t1 = tci_read_r8(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1); + tci_write_reg(regs, t0, t1); break; #endif #if TCG_TARGET_HAS_ext8s_i64 case INDEX_op_ext8s_i64: t0 = *tb_ptr++; t1 = tci_read_r8s(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1); + tci_write_reg(regs, t0, t1); break; #endif #if TCG_TARGET_HAS_ext16s_i64 case INDEX_op_ext16s_i64: t0 = *tb_ptr++; t1 = tci_read_r16s(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1); + tci_write_reg(regs, t0, t1); break; #endif #if TCG_TARGET_HAS_ext16u_i64 case INDEX_op_ext16u_i64: t0 = *tb_ptr++; t1 = tci_read_r16(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1); + tci_write_reg(regs, t0, t1); break; #endif #if TCG_TARGET_HAS_ext32s_i64 @@ -1056,7 +1050,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, case INDEX_op_ext_i32_i64: t0 = *tb_ptr++; t1 = tci_read_r32s(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1); + tci_write_reg(regs, t0, t1); break; #if TCG_TARGET_HAS_ext32u_i64 case INDEX_op_ext32u_i64: @@ -1064,41 +1058,41 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, case INDEX_op_extu_i32_i64: t0 = *tb_ptr++; t1 = tci_read_r32(regs, &tb_ptr); - tci_write_reg64(regs, t0, t1); + tci_write_reg(regs, t0, t1); break; #if TCG_TARGET_HAS_bswap16_i64 case INDEX_op_bswap16_i64: t0 = *tb_ptr++; t1 = tci_read_r16(regs, &tb_ptr); - tci_write_reg64(regs, t0, bswap16(t1)); + tci_write_reg(regs, t0, bswap16(t1)); break; #endif #if TCG_TARGET_HAS_bswap32_i64 case INDEX_op_bswap32_i64: t0 = *tb_ptr++; t1 = tci_read_r32(regs, &tb_ptr); - tci_write_reg64(regs, t0, bswap32(t1)); + tci_write_reg(regs, t0, bswap32(t1)); break; #endif #if TCG_TARGET_HAS_bswap64_i64 case INDEX_op_bswap64_i64: t0 = *tb_ptr++; t1 = tci_read_r64(regs, &tb_ptr); - tci_write_reg64(regs, t0, bswap64(t1)); + tci_write_reg(regs, t0, bswap64(t1)); break; #endif #if TCG_TARGET_HAS_not_i64 case INDEX_op_not_i64: t0 = *tb_ptr++; t1 = tci_read_r64(regs, &tb_ptr); - tci_write_reg64(regs, t0, ~t1); + tci_write_reg(regs, t0, ~t1); break; #endif #if TCG_TARGET_HAS_neg_i64 case INDEX_op_neg_i64: t0 = *tb_ptr++; t1 = tci_read_r64(regs, &tb_ptr); - tci_write_reg64(regs, t0, -t1); + tci_write_reg(regs, t0, -t1); break; #endif #endif /* TCG_TARGET_REG_BITS == 64 */ |