aboutsummaryrefslogtreecommitdiff
path: root/target/arm/translate.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/translate.c')
-rw-r--r--target/arm/translate.c478
1 files changed, 267 insertions, 211 deletions
diff --git a/target/arm/translate.c b/target/arm/translate.c
index e52a6d7622..6946e56a3a 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -224,7 +224,7 @@ static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
* We choose to ignore [1:0] in ARM mode for all architecture versions.
*/
tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
- s->is_jmp = DISAS_JUMP;
+ s->base.is_jmp = DISAS_JUMP;
}
tcg_gen_mov_i32(cpu_R[reg], var);
tcg_temp_free_i32(var);
@@ -297,7 +297,7 @@ static void gen_step_complete_exception(DisasContext *s)
gen_ss_advance(s);
gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
default_exception_el(s));
- s->is_jmp = DISAS_EXC;
+ s->base.is_jmp = DISAS_NORETURN;
}
static void gen_singlestep_exception(DisasContext *s)
@@ -321,7 +321,7 @@ static inline bool is_singlestepping(DisasContext *s)
* misnamed as it only means "one instruction per TB" and doesn't
* affect the code we generate.
*/
- return s->singlestep_enabled || s->ss_active;
+ return s->base.singlestep_enabled || s->ss_active;
}
static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
@@ -930,7 +930,7 @@ static inline void gen_bx_im(DisasContext *s, uint32_t addr)
{
TCGv_i32 tmp;
- s->is_jmp = DISAS_JUMP;
+ s->base.is_jmp = DISAS_JUMP;
if (s->thumb != (addr & 1)) {
tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, addr & 1);
@@ -943,7 +943,7 @@ static inline void gen_bx_im(DisasContext *s, uint32_t addr)
/* Set PC and Thumb state from var. var is marked as dead. */
static inline void gen_bx(DisasContext *s, TCGv_i32 var)
{
- s->is_jmp = DISAS_JUMP;
+ s->base.is_jmp = DISAS_JUMP;
tcg_gen_andi_i32(cpu_R[15], var, ~1);
tcg_gen_andi_i32(var, var, 1);
store_cpu_field(var, thumb);
@@ -957,11 +957,11 @@ static inline void gen_bx(DisasContext *s, TCGv_i32 var)
static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
{
/* Generate the same code here as for a simple bx, but flag via
- * s->is_jmp that we need to do the rest of the work later.
+ * s->base.is_jmp that we need to do the rest of the work later.
*/
gen_bx(s, var);
if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
- s->is_jmp = DISAS_BX_EXCRET;
+ s->base.is_jmp = DISAS_BX_EXCRET;
}
}
@@ -1161,7 +1161,7 @@ static inline void gen_hvc(DisasContext *s, int imm16)
*/
s->svc_imm = imm16;
gen_set_pc_im(s, s->pc);
- s->is_jmp = DISAS_HVC;
+ s->base.is_jmp = DISAS_HVC;
}
static inline void gen_smc(DisasContext *s)
@@ -1176,7 +1176,7 @@ static inline void gen_smc(DisasContext *s)
gen_helper_pre_smc(cpu_env, tmp);
tcg_temp_free_i32(tmp);
gen_set_pc_im(s, s->pc);
- s->is_jmp = DISAS_SMC;
+ s->base.is_jmp = DISAS_SMC;
}
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
@@ -1184,7 +1184,7 @@ static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset);
gen_exception_internal(excp);
- s->is_jmp = DISAS_EXC;
+ s->base.is_jmp = DISAS_NORETURN;
}
static void gen_exception_insn(DisasContext *s, int offset, int excp,
@@ -1193,14 +1193,14 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset);
gen_exception(excp, syn, target_el);
- s->is_jmp = DISAS_EXC;
+ s->base.is_jmp = DISAS_NORETURN;
}
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
- s->is_jmp = DISAS_EXIT;
+ s->base.is_jmp = DISAS_EXIT;
}
static inline void gen_hlt(DisasContext *s, int imm)
@@ -4145,7 +4145,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
{
#ifndef CONFIG_USER_ONLY
- return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
+ return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
@@ -4169,12 +4169,12 @@ static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(s, dest);
- tcg_gen_exit_tb((uintptr_t)s->tb + n);
+ tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
} else {
gen_set_pc_im(s, dest);
gen_goto_ptr();
}
- s->is_jmp = DISAS_TB_JUMP;
+ s->base.is_jmp = DISAS_NORETURN;
}
static inline void gen_jmp (DisasContext *s, uint32_t dest)
@@ -4436,7 +4436,7 @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
tcg_temp_free_i32(tcg_tgtmode);
tcg_temp_free_i32(tcg_regno);
tcg_temp_free_i32(tcg_reg);
- s->is_jmp = DISAS_UPDATE;
+ s->base.is_jmp = DISAS_UPDATE;
}
static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
@@ -4458,7 +4458,7 @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
tcg_temp_free_i32(tcg_tgtmode);
tcg_temp_free_i32(tcg_regno);
store_reg(s, rn, tcg_reg);
- s->is_jmp = DISAS_UPDATE;
+ s->base.is_jmp = DISAS_UPDATE;
}
/* Store value to PC as for an exception return (ie don't
@@ -4482,7 +4482,7 @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
gen_helper_cpsr_write_eret(cpu_env, cpsr);
tcg_temp_free_i32(cpsr);
/* Must exit loop to check un-masked IRQs */
- s->is_jmp = DISAS_EXIT;
+ s->base.is_jmp = DISAS_EXIT;
}
/* Generate an old-style exception return. Marks pc as dead. */
@@ -4505,17 +4505,17 @@ static void gen_nop_hint(DisasContext *s, int val)
case 1: /* yield */
if (!parallel_cpus) {
gen_set_pc_im(s, s->pc);
- s->is_jmp = DISAS_YIELD;
+ s->base.is_jmp = DISAS_YIELD;
}
break;
case 3: /* wfi */
gen_set_pc_im(s, s->pc);
- s->is_jmp = DISAS_WFI;
+ s->base.is_jmp = DISAS_WFI;
break;
case 2: /* wfe */
if (!parallel_cpus) {
gen_set_pc_im(s, s->pc);
- s->is_jmp = DISAS_WFE;
+ s->base.is_jmp = DISAS_WFE;
}
break;
case 4: /* sev */
@@ -7654,13 +7654,13 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
return 1;
}
gen_set_pc_im(s, s->pc);
- s->is_jmp = DISAS_WFI;
+ s->base.is_jmp = DISAS_WFI;
return 0;
default:
break;
}
- if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
@@ -7751,7 +7751,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
}
}
- if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
gen_lookup_tb(s);
@@ -8065,7 +8065,7 @@ static void gen_srs(DisasContext *s,
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
- s->is_jmp = DISAS_UPDATE;
+ s->base.is_jmp = DISAS_UPDATE;
}
static void disas_arm_insn(DisasContext *s, unsigned int insn)
@@ -8153,7 +8153,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
/* setend */
if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
gen_helper_setend(cpu_env);
- s->is_jmp = DISAS_UPDATE;
+ s->base.is_jmp = DISAS_UPDATE;
}
return;
} else if ((insn & 0x0fffff00) == 0x057ff000) {
@@ -9527,7 +9527,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
gen_helper_cpsr_write_eret(cpu_env, tmp);
tcg_temp_free_i32(tmp);
/* Must exit loop to check un-masked IRQs */
- s->is_jmp = DISAS_EXIT;
+ s->base.is_jmp = DISAS_EXIT;
}
}
break;
@@ -9565,7 +9565,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
/* swi */
gen_set_pc_im(s, s->pc);
s->svc_imm = extract32(insn, 0, 24);
- s->is_jmp = DISAS_SWI;
+ s->base.is_jmp = DISAS_SWI;
break;
default:
illegal_op:
@@ -11657,7 +11657,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
ARCH(6);
if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
gen_helper_setend(cpu_env);
- s->is_jmp = DISAS_UPDATE;
+ s->base.is_jmp = DISAS_UPDATE;
}
break;
case 3:
@@ -11751,7 +11751,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
/* swi */
gen_set_pc_im(s, s->pc);
s->svc_imm = extract32(insn, 0, 8);
- s->is_jmp = DISAS_SWI;
+ s->base.is_jmp = DISAS_SWI;
break;
}
/* generate a conditional jump to next instruction */
@@ -11824,35 +11824,14 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
return false;
}
-/* generate intermediate code for basic block 'tb'. */
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
+static int arm_tr_init_disas_context(DisasContextBase *dcbase,
+ CPUState *cs, int max_insns)
{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUARMState *env = cs->env_ptr;
ARMCPU *cpu = arm_env_get_cpu(env);
- DisasContext dc1, *dc = &dc1;
- target_ulong pc_start;
- target_ulong next_page_start;
- int num_insns;
- int max_insns;
- bool end_of_page;
-
- /* generate intermediate code */
-
- /* The A64 decoder has its own top level loop, because it doesn't need
- * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
- */
- if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
- gen_intermediate_code_a64(cs, tb);
- return;
- }
- pc_start = tb->pc;
-
- dc->tb = tb;
-
- dc->is_jmp = DISAS_NEXT;
- dc->pc = pc_start;
- dc->singlestep_enabled = cs->singlestep_enabled;
+ dc->pc = dc->base.pc_first;
dc->condjmp = 0;
dc->aarch64 = 0;
@@ -11861,23 +11840,23 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
*/
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
!arm_el_is_aa64(env, 3);
- dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
- dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
- dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
- dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
- dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
- dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags));
+ dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
+ dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
+ dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
+ dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
+ dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
+ dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0);
#endif
- dc->ns = ARM_TBFLAG_NS(tb->flags);
- dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
- dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
- dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
- dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
- dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
- dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(tb->flags);
+ dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
+ dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
+ dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
+ dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
+ dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
+ dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
+ dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
dc->cp_regs = cpu->cp_regs;
dc->features = env->features;
@@ -11896,11 +11875,26 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
* emit code to generate a software step exception
* end the TB
*/
- dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
- dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
+ dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
+ dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
dc->is_ldex = false;
dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
+ dc->next_page_start =
+ (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+
+ /* If architectural single step active, limit to 1. */
+ if (is_singlestepping(dc)) {
+ max_insns = 1;
+ }
+
+ /* ARM is a fixed-length ISA. Bound the number of insns to execute
+ to those left on the page. */
+ if (!dc->thumb) {
+ int bound = (dc->next_page_start - dc->base.pc_first) / 4;
+ max_insns = MIN(max_insns, bound);
+ }
+
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
cpu_F0d = tcg_temp_new_i64();
@@ -11909,19 +11903,13 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
cpu_V1 = cpu_F1d;
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
cpu_M0 = tcg_temp_new_i64();
- next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
- num_insns = 0;
- max_insns = tb->cflags & CF_COUNT_MASK;
- if (max_insns == 0) {
- max_insns = CF_COUNT_MASK;
- }
- if (max_insns > TCG_MAX_INSNS) {
- max_insns = TCG_MAX_INSNS;
- }
- gen_tb_start(tb);
+ return max_insns;
+}
- tcg_clear_temp_count();
+static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
/* A note on handling of the condexec (IT) bits:
*
@@ -11955,141 +11943,173 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
/* Reset the conditional execution bits immediately. This avoids
complications trying to do it at the end of the block. */
- if (dc->condexec_mask || dc->condexec_cond)
- {
+ if (dc->condexec_mask || dc->condexec_cond) {
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
store_cpu_field(tmp, condexec_bits);
- }
- do {
- dc->insn_start_idx = tcg_op_buf_count();
- tcg_gen_insn_start(dc->pc,
- (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
- 0);
- num_insns++;
+ }
+ tcg_clear_temp_count();
+}
+
+static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ dc->insn_start_idx = tcg_op_buf_count();
+ tcg_gen_insn_start(dc->pc,
+ (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
+ 0);
+}
+static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
+ const CPUBreakpoint *bp)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ if (bp->flags & BP_CPU) {
+ gen_set_condexec(dc);
+ gen_set_pc_im(dc, dc->pc);
+ gen_helper_check_breakpoints(cpu_env);
+ /* End the TB early; it's likely not going to be executed */
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ } else {
+ gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ /* The address covered by the breakpoint must be
+ included in [tb->pc, tb->pc + tb->size) in order
+ to for it to be properly cleared -- thus we
+ increment the PC here so that the logic setting
+ tb->size below does the right thing. */
+ /* TODO: Advance PC by correct instruction length to
+ * avoid disassembler error messages */
+ dc->pc += 2;
+ dc->base.is_jmp = DISAS_NORETURN;
+ }
+
+ return true;
+}
+
+static bool arm_pre_translate_insn(DisasContext *dc)
+{
#ifdef CONFIG_USER_ONLY
- /* Intercept jump to the magic kernel page. */
- if (dc->pc >= 0xffff0000) {
- /* We always get here via a jump, so know we are not in a
- conditional execution block. */
- gen_exception_internal(EXCP_KERNEL_TRAP);
- dc->is_jmp = DISAS_EXC;
- break;
- }
+ /* Intercept jump to the magic kernel page. */
+ if (dc->pc >= 0xffff0000) {
+ /* We always get here via a jump, so know we are not in a
+ conditional execution block. */
+ gen_exception_internal(EXCP_KERNEL_TRAP);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return true;
+ }
#endif
- if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
- CPUBreakpoint *bp;
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
- if (bp->pc == dc->pc) {
- if (bp->flags & BP_CPU) {
- gen_set_condexec(dc);
- gen_set_pc_im(dc, dc->pc);
- gen_helper_check_breakpoints(cpu_env);
- /* End the TB early; it's likely not going to be executed */
- dc->is_jmp = DISAS_UPDATE;
- } else {
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
- /* The address covered by the breakpoint must be
- included in [tb->pc, tb->pc + tb->size) in order
- to for it to be properly cleared -- thus we
- increment the PC here so that the logic setting
- tb->size below does the right thing. */
- /* TODO: Advance PC by correct instruction length to
- * avoid disassembler error messages */
- dc->pc += 2;
- goto done_generating;
- }
- break;
- }
- }
- }
+ if (dc->ss_active && !dc->pstate_ss) {
+ /* Singlestep state is Active-pending.
+ * If we're in this state at the start of a TB then either
+ * a) we just took an exception to an EL which is being debugged
+ * and this is the first insn in the exception handler
+ * b) debug exceptions were masked and we just unmasked them
+ * without changing EL (eg by clearing PSTATE.D)
+ * In either case we're going to take a swstep exception in the
+ * "did not step an insn" case, and so the syndrome ISV and EX
+ * bits should be zero.
+ */
+ assert(dc->base.num_insns == 1);
+ gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
+ default_exception_el(dc));
+ dc->base.is_jmp = DISAS_NORETURN;
+ return true;
+ }
- if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
- gen_io_start();
- }
+ return false;
+}
- if (dc->ss_active && !dc->pstate_ss) {
- /* Singlestep state is Active-pending.
- * If we're in this state at the start of a TB then either
- * a) we just took an exception to an EL which is being debugged
- * and this is the first insn in the exception handler
- * b) debug exceptions were masked and we just unmasked them
- * without changing EL (eg by clearing PSTATE.D)
- * In either case we're going to take a swstep exception in the
- * "did not step an insn" case, and so the syndrome ISV and EX
- * bits should be zero.
- */
- assert(num_insns == 1);
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
- default_exception_el(dc));
- goto done_generating;
- }
+static void arm_post_translate_insn(DisasContext *dc)
+{
+ if (dc->condjmp && !dc->base.is_jmp) {
+ gen_set_label(dc->condlabel);
+ dc->condjmp = 0;
+ }
+ dc->base.pc_next = dc->pc;
+ translator_loop_temp_check(&dc->base);
+}
- if (dc->thumb) {
- disas_thumb_insn(env, dc);
- if (dc->condexec_mask) {
- dc->condexec_cond = (dc->condexec_cond & 0xe)
- | ((dc->condexec_mask >> 4) & 1);
- dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
- if (dc->condexec_mask == 0) {
- dc->condexec_cond = 0;
- }
- }
- } else {
- unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
- dc->pc += 4;
- disas_arm_insn(dc, insn);
- }
+static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUARMState *env = cpu->env_ptr;
+ unsigned int insn;
- if (dc->condjmp && !dc->is_jmp) {
- gen_set_label(dc->condlabel);
- dc->condjmp = 0;
- }
+ if (arm_pre_translate_insn(dc)) {
+ return;
+ }
- if (tcg_check_temp_count()) {
- fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
- dc->pc);
- }
+ insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
+ dc->pc += 4;
+ disas_arm_insn(dc, insn);
- /* Translation stops when a conditional branch is encountered.
- * Otherwise the subsequent code could get translated several times.
- * Also stop translation when a page boundary is reached. This
- * ensures prefetch aborts occur at the right place. */
+ arm_post_translate_insn(dc);
- /* We want to stop the TB if the next insn starts in a new page,
- * or if it spans between this page and the next. This means that
- * if we're looking at the last halfword in the page we need to
- * see if it's a 16-bit Thumb insn (which will fit in this TB)
- * or a 32-bit Thumb insn (which won't).
- * This is to avoid generating a silly TB with a single 16-bit insn
- * in it at the end of this page (which would execute correctly
- * but isn't very efficient).
- */
- end_of_page = (dc->pc >= next_page_start) ||
- ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
+ /* ARM is a fixed-length ISA. We performed the cross-page check
+ in init_disas_context by adjusting max_insns. */
+}
- } while (!dc->is_jmp && !tcg_op_buf_full() &&
- !is_singlestepping(dc) &&
- !singlestep &&
- !end_of_page &&
- num_insns < max_insns);
+static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUARMState *env = cpu->env_ptr;
- if (tb->cflags & CF_LAST_IO) {
- if (dc->condjmp) {
- /* FIXME: This can theoretically happen with self-modifying
- code. */
- cpu_abort(cs, "IO on conditional branch instruction");
+ if (arm_pre_translate_insn(dc)) {
+ return;
+ }
+
+ disas_thumb_insn(env, dc);
+
+ /* Advance the Thumb condexec condition. */
+ if (dc->condexec_mask) {
+ dc->condexec_cond = ((dc->condexec_cond & 0xe) |
+ ((dc->condexec_mask >> 4) & 1));
+ dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
+ if (dc->condexec_mask == 0) {
+ dc->condexec_cond = 0;
}
- gen_io_end();
+ }
+
+ arm_post_translate_insn(dc);
+
+ /* Thumb is a variable-length ISA. Stop translation when the next insn
+ * will touch a new page. This ensures that prefetch aborts occur at
+ * the right place.
+ *
+ * We want to stop the TB if the next insn starts in a new page,
+ * or if it spans between this page and the next. This means that
+ * if we're looking at the last halfword in the page we need to
+ * see if it's a 16-bit Thumb insn (which will fit in this TB)
+ * or a 32-bit Thumb insn (which won't).
+ * This is to avoid generating a silly TB with a single 16-bit insn
+ * in it at the end of this page (which would execute correctly
+ * but isn't very efficient).
+ */
+ if (dc->base.is_jmp == DISAS_NEXT
+ && (dc->pc >= dc->next_page_start
+ || (dc->pc >= dc->next_page_start - 3
+ && insn_crosses_page(env, dc)))) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+}
+
+static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) {
+ /* FIXME: This can theoretically happen with self-modifying code. */
+ cpu_abort(cpu, "IO on conditional branch instruction");
}
/* At this stage dc->condjmp will only be set when the skipped
instruction was a conditional branch or trap, and the PC has
already been written. */
gen_set_condexec(dc);
- if (dc->is_jmp == DISAS_BX_EXCRET) {
+ if (dc->base.is_jmp == DISAS_BX_EXCRET) {
/* Exception return branches need some special case code at the
* end of the TB, which is complex enough that it has to
* handle the single-step vs not and the condition-failed
@@ -12098,7 +12118,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
gen_bx_excret_final_code(dc);
} else if (unlikely(is_singlestepping(dc))) {
/* Unconditional and "condition passed" instruction codepath. */
- switch (dc->is_jmp) {
+ switch (dc->base.is_jmp) {
case DISAS_SWI:
gen_ss_advance(dc);
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
@@ -12113,12 +12133,16 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
break;
case DISAS_NEXT:
+ case DISAS_TOO_MANY:
case DISAS_UPDATE:
gen_set_pc_im(dc, dc->pc);
/* fall through */
default:
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
gen_singlestep_exception(dc);
+ break;
+ case DISAS_NORETURN:
+ break;
}
} else {
/* While branches must always occur at the end of an IT block,
@@ -12129,8 +12153,9 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
- Hardware watchpoints.
Hardware breakpoints have already been handled and skip this code.
*/
- switch(dc->is_jmp) {
+ switch(dc->base.is_jmp) {
case DISAS_NEXT:
+ case DISAS_TOO_MANY:
gen_goto_tb(dc, 1, dc->pc);
break;
case DISAS_JUMP:
@@ -12143,8 +12168,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
/* indicate that the hash table must be used to find the next TB */
tcg_gen_exit_tb(0);
break;
- case DISAS_TB_JUMP:
- case DISAS_EXC:
+ case DISAS_NORETURN:
/* nothing more to generate */
break;
case DISAS_WFI:
@@ -12185,23 +12209,55 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
}
}
-done_generating:
- gen_tb_end(tb, num_insns);
+ /* Functions above can change dc->pc, so re-align db->pc_next */
+ dc->base.pc_next = dc->pc;
+}
+
+static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
-#ifdef DEBUG_DISAS
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
- qemu_log_in_addr_range(pc_start)) {
- qemu_log_lock();
- qemu_log("----------------\n");
- qemu_log("IN: %s\n", lookup_symbol(pc_start));
- log_target_disas(cs, pc_start, dc->pc - pc_start,
- dc->thumb | (dc->sctlr_b << 1));
- qemu_log("\n");
- qemu_log_unlock();
+ qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
+ log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size,
+ dc->thumb | (dc->sctlr_b << 1));
+}
+
+static const TranslatorOps arm_translator_ops = {
+ .init_disas_context = arm_tr_init_disas_context,
+ .tb_start = arm_tr_tb_start,
+ .insn_start = arm_tr_insn_start,
+ .breakpoint_check = arm_tr_breakpoint_check,
+ .translate_insn = arm_tr_translate_insn,
+ .tb_stop = arm_tr_tb_stop,
+ .disas_log = arm_tr_disas_log,
+};
+
+static const TranslatorOps thumb_translator_ops = {
+ .init_disas_context = arm_tr_init_disas_context,
+ .tb_start = arm_tr_tb_start,
+ .insn_start = arm_tr_insn_start,
+ .breakpoint_check = arm_tr_breakpoint_check,
+ .translate_insn = thumb_tr_translate_insn,
+ .tb_stop = arm_tr_tb_stop,
+ .disas_log = arm_tr_disas_log,
+};
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
+{
+ DisasContext dc;
+ const TranslatorOps *ops = &arm_translator_ops;
+
+ if (ARM_TBFLAG_THUMB(tb->flags)) {
+ ops = &thumb_translator_ops;
+ }
+#ifdef TARGET_AARCH64
+ if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
+ ops = &aarch64_translator_ops;
}
#endif
- tb->size = dc->pc - pc_start;
- tb->icount = num_insns;
+
+ translator_loop(ops, &dc.base, cpu, tb);
}
static const char *cpu_mode_names[16] = {