aboutsummaryrefslogtreecommitdiff
path: root/target-arm/translate.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2011-03-06 21:39:54 +0000
committerAurelien Jarno <aurelien@aurel32.net>2011-03-07 09:26:10 +0100
commit7d1b0095bff7157e856d1d0e6c4295641ced2752 (patch)
tree8088bf2ef5590e49cb1513e0f0a8f1cffe8cbf2c /target-arm/translate.c
parent27bfd83c336283d1f7a5345ee386c4cd7b80db61 (diff)
target-arm: Remove ad-hoc leak checking code
This commit removes the ad-hoc resource leak checking code from target-arm. This includes replacing all uses of new_tmp() with tcg_temp_new_i32() and all uses of dead_tmp() with tcg_temp_free_i32(). Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Diffstat (limited to 'target-arm/translate.c')
-rw-r--r--target-arm/translate.c698
1 files changed, 338 insertions, 360 deletions
diff --git a/target-arm/translate.c b/target-arm/translate.c
index b08b636635..e01c8cd304 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -128,25 +128,9 @@ void arm_translate_init(void)
#include "helpers.h"
}
-static int num_temps;
-
-/* Allocate a temporary variable. */
-static TCGv_i32 new_tmp(void)
-{
- num_temps++;
- return tcg_temp_new_i32();
-}
-
-/* Release a temporary variable. */
-static void dead_tmp(TCGv tmp)
-{
- tcg_temp_free(tmp);
- num_temps--;
-}
-
static inline TCGv load_cpu_offset(int offset)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_ld_i32(tmp, cpu_env, offset);
return tmp;
}
@@ -156,7 +140,7 @@ static inline TCGv load_cpu_offset(int offset)
static inline void store_cpu_offset(TCGv var, int offset)
{
tcg_gen_st_i32(var, cpu_env, offset);
- dead_tmp(var);
+ tcg_temp_free_i32(var);
}
#define store_cpu_field(var, name) \
@@ -181,7 +165,7 @@ static void load_reg_var(DisasContext *s, TCGv var, int reg)
/* Create a new temporary and set it to the value of a CPU register. */
static inline TCGv load_reg(DisasContext *s, int reg)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
load_reg_var(s, tmp, reg);
return tmp;
}
@@ -195,7 +179,7 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
s->is_jmp = DISAS_JUMP;
}
tcg_gen_mov_i32(cpu_R[reg], var);
- dead_tmp(var);
+ tcg_temp_free_i32(var);
}
/* Value extensions. */
@@ -219,37 +203,37 @@ static inline void gen_set_cpsr(TCGv var, uint32_t mask)
static void gen_exception(int excp)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, excp);
gen_helper_exception(tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
static void gen_smul_dual(TCGv a, TCGv b)
{
- TCGv tmp1 = new_tmp();
- TCGv tmp2 = new_tmp();
+ TCGv tmp1 = tcg_temp_new_i32();
+ TCGv tmp2 = tcg_temp_new_i32();
tcg_gen_ext16s_i32(tmp1, a);
tcg_gen_ext16s_i32(tmp2, b);
tcg_gen_mul_i32(tmp1, tmp1, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tcg_gen_sari_i32(a, a, 16);
tcg_gen_sari_i32(b, b, 16);
tcg_gen_mul_i32(b, b, a);
tcg_gen_mov_i32(a, tmp1);
- dead_tmp(tmp1);
+ tcg_temp_free_i32(tmp1);
}
/* Byteswap each halfword. */
static void gen_rev16(TCGv var)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp, var, 8);
tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
tcg_gen_shli_i32(var, var, 8);
tcg_gen_andi_i32(var, var, 0xff00ff00);
tcg_gen_or_i32(var, var, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
/* Byteswap low halfword and sign extend. */
@@ -298,7 +282,7 @@ static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
TCGv_i64 tmp64 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(tmp64, b);
- dead_tmp(b);
+ tcg_temp_free_i32(b);
tcg_gen_shli_i64(tmp64, tmp64, 32);
tcg_gen_add_i64(a, tmp64, a);
@@ -312,7 +296,7 @@ static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
TCGv_i64 tmp64 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(tmp64, b);
- dead_tmp(b);
+ tcg_temp_free_i32(b);
tcg_gen_shli_i64(tmp64, tmp64, 32);
tcg_gen_sub_i64(a, tmp64, a);
@@ -329,9 +313,9 @@ static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(tmp1, a);
- dead_tmp(a);
+ tcg_temp_free_i32(a);
tcg_gen_extu_i32_i64(tmp2, b);
- dead_tmp(b);
+ tcg_temp_free_i32(b);
tcg_gen_mul_i64(tmp1, tmp1, tmp2);
tcg_temp_free_i64(tmp2);
return tmp1;
@@ -343,9 +327,9 @@ static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp1, a);
- dead_tmp(a);
+ tcg_temp_free_i32(a);
tcg_gen_ext_i32_i64(tmp2, b);
- dead_tmp(b);
+ tcg_temp_free_i32(b);
tcg_gen_mul_i64(tmp1, tmp1, tmp2);
tcg_temp_free_i64(tmp2);
return tmp1;
@@ -354,11 +338,11 @@ static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
/* Swap low and high halfwords. */
static void gen_swap_half(TCGv var)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp, var, 16);
tcg_gen_shli_i32(var, var, 16);
tcg_gen_or_i32(var, var, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
@@ -370,15 +354,15 @@ static void gen_swap_half(TCGv var)
static void gen_add16(TCGv t0, TCGv t1)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_xor_i32(tmp, t0, t1);
tcg_gen_andi_i32(tmp, tmp, 0x8000);
tcg_gen_andi_i32(t0, t0, ~0x8000);
tcg_gen_andi_i32(t1, t1, ~0x8000);
tcg_gen_add_i32(t0, t0, t1);
tcg_gen_xor_i32(t0, t0, tmp);
- dead_tmp(tmp);
- dead_tmp(t1);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(t1);
}
#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
@@ -386,10 +370,10 @@ static void gen_add16(TCGv t0, TCGv t1)
/* Set CF to the top bit of var. */
static void gen_set_CF_bit31(TCGv var)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp, var, 31);
gen_set_CF(tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
/* Set N and Z flags from var. */
@@ -406,7 +390,7 @@ static void gen_adc(TCGv t0, TCGv t1)
tcg_gen_add_i32(t0, t0, t1);
tmp = load_cpu_field(CF);
tcg_gen_add_i32(t0, t0, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
/* dest = T0 + T1 + CF. */
@@ -416,7 +400,7 @@ static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
tcg_gen_add_i32(dest, t0, t1);
tmp = load_cpu_field(CF);
tcg_gen_add_i32(dest, dest, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
/* dest = T0 - T1 + CF - 1. */
@@ -427,7 +411,7 @@ static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
tmp = load_cpu_field(CF);
tcg_gen_add_i32(dest, dest, tmp);
tcg_gen_subi_i32(dest, dest, 1);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
/* FIXME: Implement this natively. */
@@ -435,7 +419,7 @@ static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
static void shifter_out_im(TCGv var, int shift)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
if (shift == 0) {
tcg_gen_andi_i32(tmp, var, 1);
} else {
@@ -444,7 +428,7 @@ static void shifter_out_im(TCGv var, int shift)
tcg_gen_andi_i32(tmp, tmp, 1);
}
gen_set_CF(tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
/* Shift by immediate. Includes special handling for shift == 0. */
@@ -492,7 +476,7 @@ static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
tcg_gen_shri_i32(var, var, 1);
tcg_gen_shli_i32(tmp, tmp, 31);
tcg_gen_or_i32(var, var, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
}
};
@@ -516,7 +500,7 @@ static inline void gen_arm_shift_reg(TCGv var, int shiftop,
tcg_gen_rotr_i32(var, var, shift); break;
}
}
- dead_tmp(shift);
+ tcg_temp_free_i32(shift);
}
#define PAS_OP(pfx) \
@@ -655,7 +639,7 @@ static void gen_test_cc(int cc, int label)
inv = gen_new_label();
tmp = load_cpu_field(CF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
gen_set_label(inv);
@@ -663,7 +647,7 @@ static void gen_test_cc(int cc, int label)
case 9: /* ls: !C || Z */
tmp = load_cpu_field(CF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
@@ -671,43 +655,43 @@ static void gen_test_cc(int cc, int label)
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 11: /* lt: N != V -> N ^ V != 0 */
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 12: /* gt: !Z && N == V */
inv = gen_new_label();
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
gen_set_label(inv);
break;
case 13: /* le: Z || N != V */
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
default:
fprintf(stderr, "Bad condition code 0x%x\n", cc);
abort();
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
static const uint8_t table_logic_cc[16] = {
@@ -736,10 +720,10 @@ static inline void gen_bx_im(DisasContext *s, uint32_t addr)
s->is_jmp = DISAS_UPDATE;
if (s->thumb != (addr & 1)) {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, addr & 1);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
tcg_gen_movi_i32(cpu_R[15], addr & ~1);
}
@@ -768,31 +752,31 @@ static inline void store_reg_bx(CPUState *env, DisasContext *s,
static inline TCGv gen_ld8s(TCGv addr, int index)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_qemu_ld8s(tmp, addr, index);
return tmp;
}
static inline TCGv gen_ld8u(TCGv addr, int index)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_qemu_ld8u(tmp, addr, index);
return tmp;
}
static inline TCGv gen_ld16s(TCGv addr, int index)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_qemu_ld16s(tmp, addr, index);
return tmp;
}
static inline TCGv gen_ld16u(TCGv addr, int index)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_qemu_ld16u(tmp, addr, index);
return tmp;
}
static inline TCGv gen_ld32(TCGv addr, int index)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_qemu_ld32u(tmp, addr, index);
return tmp;
}
@@ -805,17 +789,17 @@ static inline TCGv_i64 gen_ld64(TCGv addr, int index)
static inline void gen_st8(TCGv val, TCGv addr, int index)
{
tcg_gen_qemu_st8(val, addr, index);
- dead_tmp(val);
+ tcg_temp_free_i32(val);
}
static inline void gen_st16(TCGv val, TCGv addr, int index)
{
tcg_gen_qemu_st16(val, addr, index);
- dead_tmp(val);
+ tcg_temp_free_i32(val);
}
static inline void gen_st32(TCGv val, TCGv addr, int index)
{
tcg_gen_qemu_st32(val, addr, index);
- dead_tmp(val);
+ tcg_temp_free_i32(val);
}
static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
{
@@ -859,7 +843,7 @@ static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
tcg_gen_sub_i32(var, var, offset);
else
tcg_gen_add_i32(var, var, offset);
- dead_tmp(offset);
+ tcg_temp_free_i32(offset);
}
}
@@ -887,7 +871,7 @@ static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
tcg_gen_sub_i32(var, var, offset);
else
tcg_gen_add_i32(var, var, offset);
- dead_tmp(offset);
+ tcg_temp_free_i32(offset);
}
}
@@ -1065,7 +1049,7 @@ neon_reg_offset (int reg, int n)
static TCGv neon_load_reg(int reg, int pass)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
return tmp;
}
@@ -1073,7 +1057,7 @@ static TCGv neon_load_reg(int reg, int pass)
static void neon_store_reg(int reg, int pass, TCGv var)
{
tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
- dead_tmp(var);
+ tcg_temp_free_i32(var);
}
static inline void neon_load_reg64(TCGv_i64 var, int reg)
@@ -1129,7 +1113,7 @@ static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
static inline TCGv iwmmxt_load_creg(int reg)
{
- TCGv var = new_tmp();
+ TCGv var = tcg_temp_new_i32();
tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
return var;
}
@@ -1137,7 +1121,7 @@ static inline TCGv iwmmxt_load_creg(int reg)
static inline void iwmmxt_store_creg(int reg, TCGv var)
{
tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
- dead_tmp(var);
+ tcg_temp_free_i32(var);
}
static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
@@ -1268,7 +1252,7 @@ static void gen_op_iwmmxt_set_cup(void)
static void gen_op_iwmmxt_setpsr_nz(void)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
}
@@ -1300,7 +1284,7 @@ static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
if (insn & (1 << 21))
store_reg(s, rd, tmp);
else
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
} else if (insn & (1 << 21)) {
/* Post indexed */
tcg_gen_mov_i32(dest, tmp);
@@ -1326,13 +1310,13 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
tmp = iwmmxt_load_creg(rd);
}
} else {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
iwmmxt_load_reg(cpu_V0, rd);
tcg_gen_trunc_i64_i32(tmp, cpu_V0);
}
tcg_gen_andi_i32(tmp, tmp, mask);
tcg_gen_mov_i32(dest, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
return 0;
}
@@ -1364,14 +1348,14 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
}
wrd = (insn >> 12) & 0xf;
- addr = new_tmp();
+ addr = tcg_temp_new_i32();
if (gen_iwmmxt_address(s, insn, addr)) {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
return 1;
}
if (insn & ARM_CP_RW_BIT) {
if ((insn >> 28) == 0xf) { /* WLDRW wCx */
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
iwmmxt_store_creg(wrd, tmp);
} else {
@@ -1392,7 +1376,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
}
if (i) {
tcg_gen_extu_i32_i64(cpu_M0, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
}
@@ -1402,10 +1386,10 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
gen_st32(tmp, addr, IS_USER(s));
} else {
gen_op_iwmmxt_movq_M0_wRn(wrd);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WSTRD */
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
} else { /* WSTRW wRd */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
@@ -1422,7 +1406,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
}
}
}
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
return 0;
}
@@ -1457,7 +1441,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
tmp = iwmmxt_load_creg(wrd);
tmp2 = load_reg(s, rd);
tcg_gen_andc_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
iwmmxt_store_creg(wrd, tmp);
break;
case ARM_IWMMXT_wCGR0:
@@ -1670,7 +1654,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
tcg_gen_andi_i32(tmp, tmp, 7);
iwmmxt_load_reg(cpu_V1, rd1);
gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
@@ -1701,7 +1685,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
tcg_temp_free(tmp3);
tcg_temp_free(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
@@ -1711,7 +1695,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
if (rd == 15 || ((insn >> 22) & 3) == 3)
return 1;
gen_op_iwmmxt_movq_M0_wRn(wrd);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
switch ((insn >> 22) & 3) {
case 0:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
@@ -1755,7 +1739,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
}
tcg_gen_shli_i32(tmp, tmp, 28);
gen_set_nzcv(tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
break;
case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
if (((insn >> 6) & 3) == 3)
@@ -1774,7 +1758,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
break;
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
@@ -1782,7 +1766,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
return 1;
tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_mov_i32(tmp2, tmp);
switch ((insn >> 22) & 3) {
case 0:
@@ -1803,8 +1787,8 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
break;
}
gen_set_nzcv(tmp);
- dead_tmp(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
break;
case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
wrd = (insn >> 12) & 0xf;
@@ -1830,7 +1814,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
return 1;
tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_mov_i32(tmp2, tmp);
switch ((insn >> 22) & 3) {
case 0:
@@ -1851,8 +1835,8 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
break;
}
gen_set_nzcv(tmp);
- dead_tmp(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
break;
case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
rd = (insn >> 12) & 0xf;
@@ -1860,7 +1844,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
return 1;
gen_op_iwmmxt_movq_M0_wRn(rd0);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
switch ((insn >> 22) & 3) {
case 0:
gen_helper_iwmmxt_msbb(tmp, cpu_M0);
@@ -1975,9 +1959,9 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
return 1;
}
switch ((insn >> 22) & 3) {
@@ -1991,7 +1975,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
@@ -2003,9 +1987,9 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
return 1;
}
switch ((insn >> 22) & 3) {
@@ -2019,7 +2003,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
@@ -2031,9 +2015,9 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
return 1;
}
switch ((insn >> 22) & 3) {
@@ -2047,7 +2031,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
@@ -2059,31 +2043,31 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
switch ((insn >> 22) & 3) {
case 1:
if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
return 1;
}
gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 2:
if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
return 1;
}
gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 3:
if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
return 1;
}
gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
@@ -2324,12 +2308,12 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
break;
default:
- dead_tmp(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
return 1;
}
- dead_tmp(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
@@ -2378,8 +2362,8 @@ static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
default:
return 1;
}
- dead_tmp(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
gen_op_iwmmxt_movq_wRn_M0(acc);
return 0;
@@ -2425,7 +2409,7 @@ static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
if (!env->cp[cp].cp_read)
return 1;
gen_set_pc_im(s->pc);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tmp2 = tcg_const_i32(insn);
gen_helper_get_cp(tmp, cpu_env, tmp2);
tcg_temp_free(tmp2);
@@ -2438,7 +2422,7 @@ static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
tmp2 = tcg_const_i32(insn);
gen_helper_set_cp(cpu_env, tmp2, tmp);
tcg_temp_free(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
return 0;
}
@@ -2505,7 +2489,7 @@ static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, ui
store_cpu_field(tmp, cp15.c13_tls3);
break;
default:
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
return 0;
}
}
@@ -2577,17 +2561,17 @@ static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
tmp2 = tcg_const_i32(insn);
if (insn & ARM_CP_RW_BIT) {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
gen_helper_get_cp15(tmp, cpu_env, tmp2);
/* If the destination register is r15 then sets condition codes. */
if (rd != 15)
store_reg(s, rd, tmp);
else
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
} else {
tmp = load_reg(s, rd);
gen_helper_set_cp15(cpu_env, tmp2, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
/* Normally we would always end the TB here, but Linux
* arch/arm/mach-pxa/sleep.S expects two instructions following
* an MMU enable to execute from cache. Imitate this behaviour. */
@@ -2622,7 +2606,7 @@ static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
/* Move between integer and VFP cores. */
static TCGv gen_vfp_mrs(void)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_mov_i32(tmp, cpu_F0s);
return tmp;
}
@@ -2630,12 +2614,12 @@ static TCGv gen_vfp_mrs(void)
static void gen_vfp_msr(TCGv tmp)
{
tcg_gen_mov_i32(cpu_F0s, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
static void gen_neon_dup_u8(TCGv var, int shift)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
if (shift)
tcg_gen_shri_i32(var, var, shift);
tcg_gen_ext8u_i32(var, var);
@@ -2643,25 +2627,25 @@ static void gen_neon_dup_u8(TCGv var, int shift)
tcg_gen_or_i32(var, var, tmp);
tcg_gen_shli_i32(tmp, var, 16);
tcg_gen_or_i32(var, var, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
static void gen_neon_dup_low16(TCGv var)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_ext16u_i32(var, var);
tcg_gen_shli_i32(tmp, var, 16);
tcg_gen_or_i32(var, var, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
static void gen_neon_dup_high16(TCGv var)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_andi_i32(var, var, 0xffff0000);
tcg_gen_shri_i32(tmp, var, 16);
tcg_gen_or_i32(var, var, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
/* Disassemble a VFP instruction. Returns nonzero if an error occured
@@ -2756,7 +2740,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_neon_dup_low16(tmp);
}
for (n = 0; n <= pass * 2; n++) {
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_mov_i32(tmp2, tmp);
neon_store_reg(rn, n, tmp2);
}
@@ -2767,12 +2751,12 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 0:
tmp2 = neon_load_reg(rn, pass);
gen_bfi(tmp, tmp2, tmp, offset, 0xff);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
break;
case 1:
tmp2 = neon_load_reg(rn, pass);
gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
break;
case 2:
break;
@@ -2818,7 +2802,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
} else {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
gen_helper_vfp_get_fpscr(tmp, cpu_env);
}
break;
@@ -2839,7 +2823,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
if (rd == 15) {
/* Set the 4 flag bits in the CPSR. */
gen_set_nzcv(tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
} else {
store_reg(s, rd, tmp);
}
@@ -2857,7 +2841,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
break;
case ARM_VFP_FPSCR:
gen_helper_vfp_set_fpscr(cpu_env, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_lookup_tb(s);
break;
case ARM_VFP_FPEXC:
@@ -3084,7 +3068,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
tmp = gen_vfp_mrs();
tcg_gen_ext16u_i32(tmp, tmp);
gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
break;
case 5: /* vcvtt.f32.f16 */
if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
@@ -3092,31 +3076,31 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
tmp = gen_vfp_mrs();
tcg_gen_shri_i32(tmp, tmp, 16);
gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
break;
case 6: /* vcvtb.f16.f32 */
if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
return 1;
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
gen_mov_F0_vreg(0, rd);
tmp2 = gen_vfp_mrs();
tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
tcg_gen_or_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
gen_vfp_msr(tmp);
break;
case 7: /* vcvtt.f16.f32 */
if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
return 1;
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
tcg_gen_shli_i32(tmp, tmp, 16);
gen_mov_F0_vreg(0, rd);
tmp2 = gen_vfp_mrs();
tcg_gen_ext16u_i32(tmp2, tmp2);
tcg_gen_or_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
gen_vfp_msr(tmp);
break;
case 8: /* cmp */
@@ -3310,7 +3294,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
else
rd = VFP_SREG_D(insn);
if (s->thumb && rn == 15) {
- addr = new_tmp();
+ addr = tcg_temp_new_i32();
tcg_gen_movi_i32(addr, s->pc & ~2);
} else {
addr = load_reg(s, rn);
@@ -3328,7 +3312,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_mov_F0_vreg(dp, rd);
gen_vfp_st(s, dp, addr);
}
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
} else {
/* load/store multiple */
if (dp)
@@ -3368,7 +3352,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
tcg_gen_addi_i32(addr, addr, offset);
store_reg(s, rn, addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
}
}
@@ -3467,7 +3451,7 @@ static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
} else {
gen_set_cpsr(t0, mask);
}
- dead_tmp(t0);
+ tcg_temp_free_i32(t0);
gen_lookup_tb(s);
return 0;
}
@@ -3476,7 +3460,7 @@ static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
{
TCGv tmp;
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, val);
return gen_set_psr(s, mask, spsr, tmp);
}
@@ -3488,7 +3472,7 @@ static void gen_exception_return(DisasContext *s, TCGv pc)
store_reg(s, 15, pc);
tmp = load_cpu_field(spsr);
gen_set_cpsr(tmp, 0xffffffff);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
s->is_jmp = DISAS_UPDATE;
}
@@ -3496,7 +3480,7 @@ static void gen_exception_return(DisasContext *s, TCGv pc)
static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
{
gen_set_cpsr(cpsr, 0xffffffff);
- dead_tmp(cpsr);
+ tcg_temp_free_i32(cpsr);
store_reg(s, 15, pc);
s->is_jmp = DISAS_UPDATE;
}
@@ -3506,7 +3490,7 @@ gen_set_condexec (DisasContext *s)
{
if (s->condexec_mask) {
uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, val);
store_cpu_field(tmp, condexec_bits);
}
@@ -3612,7 +3596,7 @@ static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
static TCGv neon_load_scratch(int scratch)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
return tmp;
}
@@ -3620,7 +3604,7 @@ static TCGv neon_load_scratch(int scratch)
static void neon_store_scratch(int scratch, TCGv var)
{
tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
- dead_tmp(var);
+ tcg_temp_free_i32(var);
}
static inline TCGv neon_get_scalar(int size, int reg)
@@ -3721,8 +3705,8 @@ static void gen_neon_trn_u8(TCGv t0, TCGv t1)
{
TCGv rd, tmp;
- rd = new_tmp();
- tmp = new_tmp();
+ rd = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
tcg_gen_shli_i32(rd, t0, 8);
tcg_gen_andi_i32(rd, rd, 0xff00ff00);
@@ -3735,16 +3719,16 @@ static void gen_neon_trn_u8(TCGv t0, TCGv t1)
tcg_gen_or_i32(t1, t1, tmp);
tcg_gen_mov_i32(t0, rd);
- dead_tmp(tmp);
- dead_tmp(rd);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(rd);
}
static void gen_neon_trn_u16(TCGv t0, TCGv t1)
{
TCGv rd, tmp;
- rd = new_tmp();
- tmp = new_tmp();
+ rd = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
tcg_gen_shli_i32(rd, t0, 16);
tcg_gen_andi_i32(tmp, t1, 0xffff);
@@ -3754,8 +3738,8 @@ static void gen_neon_trn_u16(TCGv t0, TCGv t1)
tcg_gen_or_i32(t1, t1, tmp);
tcg_gen_mov_i32(t0, rd);
- dead_tmp(tmp);
- dead_tmp(rd);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(rd);
}
@@ -3804,7 +3788,7 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
rn = (insn >> 16) & 0xf;
rm = insn & 0xf;
load = (insn & (1 << 21)) != 0;
- addr = new_tmp();
+ addr = tcg_temp_new_i32();
if ((insn & (1 << 23)) == 0) {
/* Load store all elements. */
op = (insn >> 8) & 0xf;
@@ -3856,11 +3840,11 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
tcg_gen_addi_i32(addr, addr, stride);
tcg_gen_shli_i32(tmp2, tmp2, 16);
tcg_gen_or_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
neon_store_reg(rd, pass, tmp);
} else {
tmp = neon_load_reg(rd, pass);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp2, tmp, 16);
gen_st16(tmp, addr, IS_USER(s));
tcg_gen_addi_i32(addr, addr, stride);
@@ -3878,14 +3862,14 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
} else {
tcg_gen_shli_i32(tmp, tmp, n * 8);
tcg_gen_or_i32(tmp2, tmp2, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
}
neon_store_reg(rd, pass, tmp2);
} else {
tmp2 = neon_load_reg(rd, pass);
for (n = 0; n < 4; n++) {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
if (n == 0) {
tcg_gen_mov_i32(tmp, tmp2);
} else {
@@ -3894,7 +3878,7 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_st8(tmp, addr, IS_USER(s));
tcg_gen_addi_i32(addr, addr, stride);
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
}
}
@@ -3931,7 +3915,7 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
abort();
}
tcg_gen_addi_i32(addr, addr, 1 << size);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_mov_i32(tmp2, tmp);
neon_store_reg(rd, 0, tmp2);
neon_store_reg(rd, 1, tmp);
@@ -3977,7 +3961,7 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
if (size != 2) {
tmp2 = neon_load_reg(rd, pass);
gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
neon_store_reg(rd, pass, tmp);
} else { /* Store */
@@ -4002,7 +3986,7 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
stride = nregs * (1 << size);
}
}
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
if (rm != 15) {
TCGv base;
@@ -4013,7 +3997,7 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
TCGv index;
index = load_reg(s, rm);
tcg_gen_add_i32(base, base, index);
- dead_tmp(index);
+ tcg_temp_free_i32(index);
}
store_reg(s, rn, base);
}
@@ -4119,7 +4103,7 @@ static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
default: abort();
}
}
- dead_tmp(src);
+ tcg_temp_free_i32(src);
}
static inline void gen_neon_addl(int size)
@@ -4184,8 +4168,8 @@ static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
/* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
Don't forget to clean them now. */
if (size < 2) {
- dead_tmp(a);
- dead_tmp(b);
+ tcg_temp_free_i32(a);
+ tcg_temp_free_i32(b);
}
}
@@ -4389,17 +4373,17 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 5: /* VBSL */
tmp3 = neon_load_reg(rd, pass);
gen_neon_bsl(tmp, tmp, tmp2, tmp3);
- dead_tmp(tmp3);
+ tcg_temp_free_i32(tmp3);
break;
case 6: /* VBIT */
tmp3 = neon_load_reg(rd, pass);
gen_neon_bsl(tmp, tmp, tmp3, tmp2);
- dead_tmp(tmp3);
+ tcg_temp_free_i32(tmp3);
break;
case 7: /* VBIF */
tmp3 = neon_load_reg(rd, pass);
gen_neon_bsl(tmp, tmp3, tmp, tmp2);
- dead_tmp(tmp3);
+ tcg_temp_free_i32(tmp3);
break;
}
break;
@@ -4438,7 +4422,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
break;
case 15: /* VABA */
GEN_NEON_INTEGER_OP(abd);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tmp2 = neon_load_reg(rd, pass);
gen_neon_add(size, tmp, tmp2);
break;
@@ -4479,7 +4463,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
default: return 1;
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tmp2 = neon_load_reg(rd, pass);
if (u) { /* VMLS */
gen_neon_rsb(size, tmp, tmp2);
@@ -4551,7 +4535,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 27: /* Float multiply. */
gen_helper_neon_mul_f32(tmp, tmp, tmp2);
if (!u) {
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tmp2 = neon_load_reg(rd, pass);
if (size == 0) {
gen_helper_neon_add_f32(tmp, tmp, tmp2);
@@ -4593,7 +4577,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
default:
abort();
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
/* Save the result. For elementwise operations we can put it
straight into the destination register. For pairwise operations
@@ -4726,7 +4710,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
} else { /* size < 3 */
/* Operands in T0 and T1. */
tmp = neon_load_reg(rm, pass);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, imm);
switch (op) {
case 0: /* VSHR */
@@ -4775,13 +4759,13 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
GEN_NEON_INTEGER_OP_ENV(qshl);
break;
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (op == 1 || op == 3) {
/* Accumulate. */
tmp2 = neon_load_reg(rd, pass);
gen_neon_add(size, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
} else if (op == 4 || (op == 5 && u)) {
/* Insert */
switch (size) {
@@ -4817,7 +4801,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
tcg_gen_andi_i32(tmp, tmp, mask);
tcg_gen_andi_i32(tmp2, tmp2, ~mask);
tcg_gen_or_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
neon_store_reg(rd, pass, tmp);
}
@@ -4853,7 +4837,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
}
}
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
neon_store_reg(rd, pass, tmp);
} /* for pass */
@@ -4885,9 +4869,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_neon_shift_narrow(size, tmp3, tmp2, q,
input_unsigned);
tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
- dead_tmp(tmp);
- dead_tmp(tmp3);
- tmp = new_tmp();
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp3);
+ tmp = tcg_temp_new_i32();
gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
neon_store_reg(rd, pass, tmp);
} /* for pass */
@@ -5016,7 +5000,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
}
} else {
/* VMOV, VMVN. */
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
if (op == 14 && invert) {
uint32_t val;
val = 0;
@@ -5133,8 +5117,8 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
break;
default: abort();
}
- dead_tmp(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
break;
case 8: case 9: case 10: case 11: case 12: case 13:
/* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
@@ -5142,8 +5126,8 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
break;
case 14: /* Polynomial VMULL */
gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
- dead_tmp(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
break;
default: /* 15 is RESERVED. */
return 1;
@@ -5175,7 +5159,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
neon_store_reg64(cpu_V0, rd + pass);
} else if (op == 4 || op == 6) {
/* Narrowing operation. */
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
if (!u) {
switch (size) {
case 0:
@@ -5255,7 +5239,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
default: return 1;
}
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (op < 8) {
/* Accumulate. */
tmp2 = neon_load_reg(rd, pass);
@@ -5275,7 +5259,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
default:
abort();
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
neon_store_reg(rd, pass, tmp);
}
@@ -5292,7 +5276,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
tmp2 = neon_get_scalar(size, rm);
/* We need a copy of tmp2 because gen_neon_mull
* deletes it during pass 0. */
- tmp4 = new_tmp();
+ tmp4 = tcg_temp_new_i32();
tcg_gen_mov_i32(tmp4, tmp2);
tmp3 = neon_load_reg(rn, 1);
@@ -5472,7 +5456,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
TCGV_UNUSED(tmp2);
for (pass = 0; pass < 2; pass++) {
neon_load_reg64(cpu_V0, rm + pass);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
gen_neon_narrow_op(op == 36, q, size, tmp, cpu_V0);
if (pass == 0) {
tmp2 = tmp;
@@ -5498,8 +5482,8 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 44: /* VCVT.F16.F32 */
if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
return 1;
- tmp = new_tmp();
- tmp2 = new_tmp();
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
@@ -5510,17 +5494,17 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
neon_store_reg(rd, 0, tmp2);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
tcg_gen_shli_i32(tmp2, tmp2, 16);
tcg_gen_or_i32(tmp2, tmp2, tmp);
neon_store_reg(rd, 1, tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
break;
case 46: /* VCVT.F32.F16 */
if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
return 1;
- tmp3 = new_tmp();
+ tmp3 = tcg_temp_new_i32();
tmp = neon_load_reg(rm, 0);
tmp2 = neon_load_reg(rm, 1);
tcg_gen_ext16u_i32(tmp3, tmp);
@@ -5529,15 +5513,15 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
tcg_gen_shri_i32(tmp3, tmp, 16);
gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
tcg_gen_ext16u_i32(tmp3, tmp2);
gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
tcg_gen_shri_i32(tmp3, tmp2, 16);
gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
- dead_tmp(tmp2);
- dead_tmp(tmp3);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp3);
break;
default:
elementwise:
@@ -5735,18 +5719,18 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
if (insn & (1 << 6)) {
tmp = neon_load_reg(rd, 0);
} else {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
}
tmp2 = neon_load_reg(rm, 0);
tmp4 = tcg_const_i32(rn);
tmp5 = tcg_const_i32(n);
gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
if (insn & (1 << 6)) {
tmp = neon_load_reg(rd, 1);
} else {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
}
tmp3 = neon_load_reg(rm, 1);
@@ -5755,7 +5739,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
tcg_temp_free_i32(tmp4);
neon_store_reg(rd, 0, tmp2);
neon_store_reg(rd, 1, tmp3);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
} else if ((insn & 0x380) == 0) {
/* VDUP */
if (insn & (1 << 19)) {
@@ -5772,11 +5756,11 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_neon_dup_low16(tmp);
}
for (pass = 0; pass < (q ? 4 : 2); pass++) {
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_mov_i32(tmp2, tmp);
neon_store_reg(rd, pass, tmp2);
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
} else {
return 1;
}
@@ -5833,7 +5817,7 @@ static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
return 1;
tmp = load_reg(s, rt);
gen_helper_set_teecr(cpu_env, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
return 0;
}
if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
@@ -5894,10 +5878,10 @@ static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
{
TCGv tmp;
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp, val);
store_reg(s, rlow, tmp);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_shri_i64(val, val, 32);
tcg_gen_trunc_i64_i32(tmp, val);
store_reg(s, rhigh, tmp);
@@ -5913,7 +5897,7 @@ static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
tmp = tcg_temp_new_i64();
tmp2 = load_reg(s, rlow);
tcg_gen_extu_i32_i64(tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tcg_gen_add_i64(val, val, tmp);
tcg_temp_free_i64(tmp);
}
@@ -5930,8 +5914,8 @@ static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
tmph = load_reg(s, rhigh);
tmp = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
- dead_tmp(tmpl);
- dead_tmp(tmph);
+ tcg_temp_free_i32(tmpl);
+ tcg_temp_free_i32(tmph);
tcg_gen_add_i64(val, val, tmp);
tcg_temp_free_i64(tmp);
}
@@ -5939,10 +5923,10 @@ static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
/* Set N and Z flags from a 64-bit value. */
static void gen_logicq_cc(TCGv_i64 val)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
gen_helper_logicq_cc(tmp, val);
gen_logic_CC(tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
/* Load/Store exclusive instructions are implemented by remembering
@@ -5976,10 +5960,10 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
tcg_gen_mov_i32(cpu_exclusive_val, tmp);
store_reg(s, rt, tmp);
if (size == 3) {
- TCGv tmp2 = new_tmp();
+ TCGv tmp2 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
tmp = gen_ld32(tmp2, IS_USER(s));
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tcg_gen_mov_i32(cpu_exclusive_high, tmp);
store_reg(s, rt2, tmp);
}
@@ -6032,14 +6016,14 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
abort();
}
tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
if (size == 3) {
- TCGv tmp2 = new_tmp();
+ TCGv tmp2 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
tmp = gen_ld32(tmp2, IS_USER(s));
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
tmp = load_reg(s, rt);
switch (size) {
@@ -6160,7 +6144,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
goto illegal_op;
ARCH(6);
op1 = (insn & 0x1f);
- addr = new_tmp();
+ addr = tcg_temp_new_i32();
tmp = tcg_const_i32(op1);
gen_helper_get_r13_banked(addr, cpu_env, tmp);
tcg_temp_free_i32(tmp);
@@ -6193,9 +6177,9 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tmp = tcg_const_i32(op1);
gen_helper_set_r13_banked(cpu_env, tmp, addr);
tcg_temp_free_i32(tmp);
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
return;
} else if ((insn & 0x0e50ffe0) == 0x08100a00) {
@@ -6233,7 +6217,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tcg_gen_addi_i32(addr, addr, offset);
store_reg(s, rn, addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
gen_rfe(s, tmp, tmp2);
return;
@@ -6242,7 +6226,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
int32_t offset;
val = (uint32_t)s->pc;
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, val);
store_reg(s, 14, tmp);
/* Sign-extend the 24-bit offset */
@@ -6306,7 +6290,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
if ((insn & (1 << 22)) == 0) {
/* MOVW */
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, val);
} else {
/* MOVT */
@@ -6353,7 +6337,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
goto illegal_op;
tmp = load_cpu_field(spsr);
} else {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
gen_helper_cpsr_read(tmp);
}
store_reg(s, rd, tmp);
@@ -6390,7 +6374,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* branch link/exchange thumb (blx) */
tmp = load_reg(s, rm);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, s->pc);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
@@ -6406,7 +6390,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
gen_helper_sub_saturate(tmp, tmp, tmp2);
else
gen_helper_add_saturate(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
case 7:
@@ -6436,13 +6420,13 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
gen_sxth(tmp2);
tmp64 = gen_muls_i64_i32(tmp, tmp2);
tcg_gen_shri_i64(tmp64, tmp64, 16);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
gen_helper_add_setq(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
} else {
@@ -6450,11 +6434,11 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rs);
gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (op1 == 2) {
tmp64 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp64, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_addq(s, tmp64, rn, rd);
gen_storeq_reg(s, rn, rd, tmp64);
tcg_temp_free_i64(tmp64);
@@ -6462,7 +6446,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (op1 == 0) {
tmp2 = load_reg(s, rn);
gen_helper_add_setq(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
}
@@ -6488,7 +6472,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (shift) {
val = (val >> shift) | (val << (32 - shift));
}
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, val);
if (logic_cc && shift) {
gen_set_CF_bit31(tmp2);
@@ -6591,26 +6575,26 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tcg_gen_and_i32(tmp, tmp, tmp2);
gen_logic_CC(tmp);
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
break;
case 0x09:
if (set_cc) {
tcg_gen_xor_i32(tmp, tmp, tmp2);
gen_logic_CC(tmp);
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
break;
case 0x0a:
if (set_cc) {
gen_helper_sub_cc(tmp, tmp, tmp2);
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
break;
case 0x0b:
if (set_cc) {
gen_helper_add_cc(tmp, tmp, tmp2);
}
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
break;
case 0x0c:
tcg_gen_or_i32(tmp, tmp, tmp2);
@@ -6650,7 +6634,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
break;
}
if (op1 != 0x0f && op1 != 0x0d) {
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
} else {
/* other instructions */
@@ -6673,18 +6657,18 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tmp = load_reg(s, rs);
tmp2 = load_reg(s, rm);
tcg_gen_mul_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (insn & (1 << 22)) {
/* Subtract (mls) */
ARCH(6T2);
tmp2 = load_reg(s, rn);
tcg_gen_sub_i32(tmp, tmp2, tmp);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
} else if (insn & (1 << 21)) {
/* Add */
tmp2 = load_reg(s, rn);
tcg_gen_add_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
if (insn & (1 << 20))
gen_logic_CC(tmp);
@@ -6788,7 +6772,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tmp2 = gen_ld32(addr, IS_USER(s));
gen_st32(tmp, addr, IS_USER(s));
}
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
store_reg(s, rd, tmp2);
}
}
@@ -6855,7 +6839,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tcg_gen_addi_i32(addr, addr, address_offset);
store_reg(s, rn, addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
if (load) {
/* Complete the load. */
@@ -6884,7 +6868,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if ((op1 & 3) == 0 || sh == 5 || sh == 6)
goto illegal_op;
gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
case 1:
@@ -6908,7 +6892,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
}
tcg_gen_or_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00200020) == 0x00200000) {
/* [us]sat */
@@ -6944,11 +6928,11 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* Select bytes. */
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
- tmp3 = new_tmp();
+ tmp3 = tcg_temp_new_i32();
tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
- dead_tmp(tmp3);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x000003e0) == 0x00000060) {
tmp = load_reg(s, rm);
@@ -6973,7 +6957,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
gen_add16(tmp, tmp2);
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
}
store_reg(s, rd, tmp);
@@ -7018,7 +7002,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
}
tcg_gen_shri_i64(tmp64, tmp64, 32);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rn, tmp);
@@ -7032,12 +7016,12 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (insn & (1 << 22)) {
/* smlald, smlsld */
tmp64 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp64, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_addq(s, tmp64, rd, rn);
gen_storeq_reg(s, rd, rn, tmp64);
tcg_temp_free_i64(tmp64);
@@ -7047,7 +7031,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
{
tmp2 = load_reg(s, rd);
gen_helper_add_setq(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
store_reg(s, rn, tmp);
}
@@ -7061,11 +7045,11 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rs);
gen_helper_usad8(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (rd != 15) {
tmp2 = load_reg(s, rd);
tcg_gen_add_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
store_reg(s, rn, tmp);
break;
@@ -7076,7 +7060,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
i = (insn >> 16) & 0x1f;
i = i + 1 - shift;
if (rm == 15) {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
} else {
tmp = load_reg(s, rm);
@@ -7084,7 +7068,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (i != 32) {
tmp2 = load_reg(s, rd);
gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
break;
@@ -7150,7 +7134,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
} else if (insn & (1 << 21)) {
store_reg(s, rn, tmp2);
} else {
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
if (insn & (1 << 20)) {
/* Complete the load. */
@@ -7216,7 +7200,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tmp2 = tcg_const_i32(i);
gen_helper_set_user_reg(tmp2, tmp);
tcg_temp_free_i32(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
} else if (i == rn) {
loaded_var = tmp;
loaded_base = 1;
@@ -7228,10 +7212,10 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (i == 15) {
/* special case: r15 = PC + 8 */
val = (long)s->pc + 4;
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, val);
} else if (user) {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tmp2 = tcg_const_i32(i);
gen_helper_get_user_reg(tmp, tmp2);
tcg_temp_free_i32(tmp2);
@@ -7267,7 +7251,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
}
store_reg(s, rn, addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
if (loaded_base) {
store_reg(s, rn, loaded_var);
@@ -7276,7 +7260,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(spsr);
gen_set_cpsr(tmp, 0xffffffff);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
s->is_jmp = DISAS_UPDATE;
}
}
@@ -7289,7 +7273,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* branch (and link) */
val = (int32_t)s->pc;
if (insn & (1 << 24)) {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, val);
store_reg(s, 14, tmp);
}
@@ -7427,7 +7411,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tcg_gen_addi_i32(tmp, tmp, offset);
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, s->pc | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
@@ -7439,7 +7423,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tmp = load_reg(s, 14);
tcg_gen_addi_i32(tmp, tmp, offset);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, s->pc | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
@@ -7478,7 +7462,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
if (insn & 0x01200000) {
/* Load/store doubleword. */
if (rn == 15) {
- addr = new_tmp();
+ addr = tcg_temp_new_i32();
tcg_gen_movi_i32(addr, s->pc & ~3);
} else {
addr = load_reg(s, rn);
@@ -7512,7 +7496,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tcg_gen_addi_i32(addr, addr, offset - 4);
store_reg(s, rn, addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
} else if ((insn & (1 << 23)) == 0) {
/* Load/store exclusive word. */
@@ -7528,7 +7512,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
} else if ((insn & (1 << 6)) == 0) {
/* Table Branch. */
if (rn == 15) {
- addr = new_tmp();
+ addr = tcg_temp_new_i32();
tcg_gen_movi_i32(addr, s->pc);
} else {
addr = load_reg(s, rn);
@@ -7538,13 +7522,13 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
if (insn & (1 << 4)) {
/* tbh */
tcg_gen_add_i32(addr, addr, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
tmp = gen_ld16u(addr, IS_USER(s));
} else { /* tbb */
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
tmp = gen_ld8u(addr, IS_USER(s));
}
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
tcg_gen_shli_i32(tmp, tmp, 1);
tcg_gen_addi_i32(tmp, tmp, s->pc);
store_reg(s, 15, tmp);
@@ -7588,13 +7572,13 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
}
store_reg(s, rn, addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
gen_rfe(s, tmp, tmp2);
} else {
/* srs */
op = (insn & 0x1f);
- addr = new_tmp();
+ addr = tcg_temp_new_i32();
tmp = tcg_const_i32(op);
gen_helper_get_r13_banked(addr, cpu_env, tmp);
tcg_temp_free_i32(tmp);
@@ -7604,7 +7588,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tmp = load_reg(s, 14);
gen_st32(tmp, addr, 0);
tcg_gen_addi_i32(addr, addr, 4);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
gen_helper_cpsr_read(tmp);
gen_st32(tmp, addr, 0);
if (insn & (1 << 21)) {
@@ -7617,7 +7601,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_helper_set_r13_banked(cpu_env, tmp, addr);
tcg_temp_free_i32(tmp);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
}
} else {
@@ -7661,7 +7645,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
goto illegal_op;
store_reg(s, rn, addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
}
}
@@ -7689,12 +7673,12 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
}
tcg_gen_or_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else {
/* Data processing register constant shift. */
if (rn == 15) {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
} else {
tmp = load_reg(s, rn);
@@ -7708,11 +7692,11 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
goto illegal_op;
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (rd != 15) {
store_reg(s, rd, tmp);
} else {
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
}
break;
@@ -7756,7 +7740,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_add16(tmp, tmp2);
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
}
store_reg(s, rd, tmp);
@@ -7769,7 +7753,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
case 3: /* Other data processing. */
@@ -7784,7 +7768,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_helper_sub_saturate(tmp, tmp2, tmp);
else
gen_helper_add_saturate(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
} else {
tmp = load_reg(s, rn);
switch (op) {
@@ -7802,11 +7786,11 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
break;
case 0x10: /* sel */
tmp2 = load_reg(s, rm);
- tmp3 = new_tmp();
+ tmp3 = tcg_temp_new_i32();
tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
- dead_tmp(tmp3);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_i32(tmp2);
break;
case 0x18: /* clz */
gen_helper_clz(tmp, tmp);
@@ -7824,23 +7808,23 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
switch ((insn >> 20) & 7) {
case 0: /* 32 x 32 -> 32 */
tcg_gen_mul_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
if (op)
tcg_gen_sub_i32(tmp, tmp2, tmp);
else
tcg_gen_add_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
break;
case 1: /* 16 x 16 -> 32 */
gen_mulxy(tmp, tmp2, op & 2, op & 1);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
gen_helper_add_setq(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
break;
case 2: /* Dual multiply add. */
@@ -7854,12 +7838,12 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
gen_helper_add_setq(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
break;
case 3: /* 32 * 16 -> 32msb */
@@ -7869,14 +7853,14 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_sxth(tmp2);
tmp64 = gen_muls_i64_i32(tmp, tmp2);
tcg_gen_shri_i64(tmp64, tmp64, 16);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
gen_helper_add_setq(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
break;
case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
@@ -7893,17 +7877,17 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
}
tcg_gen_shri_i64(tmp64, tmp64, 32);
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
break;
case 7: /* Unsigned sum of absolute differences. */
gen_helper_usad8(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
tcg_gen_add_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
break;
}
@@ -7921,7 +7905,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_helper_udiv(tmp, tmp, tmp2);
else
gen_helper_sdiv(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((op & 0xe) == 0xc) {
/* Dual multiply accumulate long. */
@@ -7933,11 +7917,11 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
/* BUGFIX */
tmp64 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp64, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_addq(s, tmp64, rs, rd);
gen_storeq_reg(s, rs, rd, tmp64);
tcg_temp_free_i64(tmp64);
@@ -7949,10 +7933,10 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
if (op & 8) {
/* smlalxy */
gen_mulxy(tmp, tmp2, op & 2, op & 1);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
tmp64 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp64, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
} else {
/* Signed 64-bit multiply */
tmp64 = gen_muls_i64_i32(tmp, tmp2);
@@ -8032,7 +8016,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
addr = tcg_const_i32(insn & 0xff);
gen_helper_v7m_msr(cpu_env, addr, tmp);
tcg_temp_free_i32(addr);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
gen_lookup_tb(s);
break;
}
@@ -8106,7 +8090,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_exception_return(s, tmp);
break;
case 6: /* mrs cpsr. */
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
if (IS_M(env)) {
addr = tcg_const_i32(insn & 0xff);
gen_helper_v7m_mrs(tmp, cpu_env, addr);
@@ -8158,7 +8142,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
imm = insn & 0x1f;
shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
if (rn == 15) {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
} else {
tmp = load_reg(s, rn);
@@ -8185,7 +8169,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
if (imm != 32) {
tmp2 = load_reg(s, rd);
gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
break;
case 7:
@@ -8228,7 +8212,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tcg_gen_ori_i32(tmp, tmp, imm << 16);
} else {
/* movw */
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, imm);
}
} else {
@@ -8239,7 +8223,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
offset -= imm;
else
offset += imm;
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, offset);
} else {
tmp = load_reg(s, rn);
@@ -8278,11 +8262,11 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
shifter_out = 1;
break;
}
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, imm);
rn = (insn >> 16) & 0xf;
if (rn == 15) {
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
} else {
tmp = load_reg(s, rn);
@@ -8291,12 +8275,12 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
shifter_out, tmp, tmp2))
goto illegal_op;
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
rd = (insn >> 8) & 0xf;
if (rd != 15) {
store_reg(s, rd, tmp);
} else {
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
}
}
}
@@ -8349,7 +8333,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
}
user = IS_USER(s);
if (rn == 15) {
- addr = new_tmp();
+ addr = tcg_temp_new_i32();
/* PC relative. */
/* s->pc has already been incremented by 4. */
imm = s->pc & 0xfffffffc;
@@ -8375,7 +8359,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
if (shift)
tcg_gen_shli_i32(tmp, tmp, shift);
tcg_gen_add_i32(addr, addr, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
break;
case 4: /* Negative offset. */
tcg_gen_addi_i32(addr, addr, -imm);
@@ -8433,7 +8417,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
if (writeback) {
store_reg(s, rn, addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
}
break;
@@ -8477,7 +8461,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tmp = load_reg(s, rn);
if (insn & (1 << 10)) {
/* immediate */
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
} else {
/* reg */
@@ -8495,7 +8479,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
else
gen_helper_add_cc(tmp, tmp, tmp2);
}
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else {
/* shift immediate */
@@ -8513,27 +8497,27 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
op = (insn >> 11) & 3;
rd = (insn >> 8) & 0x7;
if (op == 0) { /* mov */
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, insn & 0xff);
if (!s->condexec_mask)
gen_logic_CC(tmp);
store_reg(s, rd, tmp);
} else {
tmp = load_reg(s, rd);
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, insn & 0xff);
switch (op) {
case 1: /* cmp */
gen_helper_sub_cc(tmp, tmp, tmp2);
- dead_tmp(tmp);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
break;
case 2: /* add */
if (s->condexec_mask)
tcg_gen_add_i32(tmp, tmp, tmp2);
else
gen_helper_add_cc(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
case 3: /* sub */
@@ -8541,7 +8525,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tcg_gen_sub_i32(tmp, tmp, tmp2);
else
gen_helper_sub_cc(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
}
@@ -8553,10 +8537,10 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
/* load pc-relative. Bit 1 of PC is ignored. */
val = s->pc + 2 + ((insn & 0xff) * 4);
val &= ~(uint32_t)2;
- addr = new_tmp();
+ addr = tcg_temp_new_i32();
tcg_gen_movi_i32(addr, val);
tmp = gen_ld32(addr, IS_USER(s));
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
store_reg(s, rd, tmp);
break;
}
@@ -8570,15 +8554,15 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tmp = load_reg(s, rd);
tmp2 = load_reg(s, rm);
tcg_gen_add_i32(tmp, tmp, tmp2);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
case 1: /* cmp */
tmp = load_reg(s, rd);
tmp2 = load_reg(s, rm);
gen_helper_sub_cc(tmp, tmp, tmp2);
- dead_tmp(tmp2);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
break;
case 2: /* mov/cpy */
tmp = load_reg(s, rm);
@@ -8588,7 +8572,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tmp = load_reg(s, rm);
if (insn & (1 << 7)) {
val = (uint32_t)s->pc | 1;
- tmp2 = new_tmp();
+ tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, val);
store_reg(s, 14, tmp2);
}
@@ -8613,7 +8597,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
}
if (op == 9) { /* neg */
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
} else if (op != 0xf) { /* mvn doesn't read its first operand */
tmp = load_reg(s, rd);
@@ -8724,14 +8708,14 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
if (val) {
store_reg(s, rm, tmp2);
if (op != 0xf)
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
} else {
store_reg(s, rd, tmp);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp2);
}
} else {
- dead_tmp(tmp);
- dead_tmp(tmp2);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
}
break;
@@ -8744,7 +8728,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
addr = load_reg(s, rn);
tmp = load_reg(s, rm);
tcg_gen_add_i32(addr, addr, tmp);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
if (op < 3) /* store */
tmp = load_reg(s, rd);
@@ -8777,7 +8761,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
}
if (op >= 3) /* load */
store_reg(s, rd, tmp);
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
break;
case 6:
@@ -8797,7 +8781,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tmp = load_reg(s, rd);
gen_st32(tmp, addr, IS_USER(s));
}
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
break;
case 7:
@@ -8817,7 +8801,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tmp = load_reg(s, rd);
gen_st8(tmp, addr, IS_USER(s));
}
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
break;
case 8:
@@ -8837,7 +8821,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tmp = load_reg(s, rd);
gen_st16(tmp, addr, IS_USER(s));
}
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
break;
case 9:
@@ -8856,7 +8840,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tmp = load_reg(s, rd);
gen_st32(tmp, addr, IS_USER(s));
}
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
break;
case 10:
@@ -8867,7 +8851,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tmp = load_reg(s, 13);
} else {
/* PC. bit 1 is ignored. */
- tmp = new_tmp();
+ tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
}
val = (insn & 0xff) * 4;
@@ -8964,7 +8948,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
else
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
- dead_tmp(tmp);
+ tcg_temp_free_i32(tmp);
offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
val = (uint32_t)s->pc + 2;
val += offset;
@@ -9057,7 +9041,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
if ((insn & (1 << rn)) == 0) {
store_reg(s, rn, addr);
} else {
- dead_tmp(addr);
+ tcg_temp_free_i32(addr);
}
break;
@@ -9129,8 +9113,6 @@ static inline void gen_intermediate_code_internal(CPUState *env,
int max_insns;
/* generate intermediate code */
- num_temps = 0;
-
pc_start = tb->pc;
dc->tb = tb;
@@ -9202,7 +9184,7 @@ static inline void gen_intermediate_code_internal(CPUState *env,
complications trying to do it at the end of the block. */
if (dc->condexec_mask || dc->condexec_cond)
{
- TCGv tmp = new_tmp();
+ TCGv tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
store_cpu_field(tmp, condexec_bits);
}
@@ -9271,10 +9253,6 @@ static inline void gen_intermediate_code_internal(CPUState *env,
} else {
disas_arm_insn(env, dc);
}
- if (num_temps) {
- fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
- num_temps = 0;
- }
if (dc->condjmp && !dc->is_jmp) {
gen_set_label(dc->condlabel);