diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/cpu.c | 43 | ||||
-rw-r--r-- | target/arm/cpu.h | 10 | ||||
-rw-r--r-- | target/arm/helper.c | 19 | ||||
-rw-r--r-- | target/arm/internals.h | 21 | ||||
-rw-r--r-- | target/arm/kvm64.c | 4 | ||||
-rw-r--r-- | target/arm/op_helper.c | 23 | ||||
-rw-r--r-- | target/arm/translate.c | 181 | ||||
-rw-r--r-- | target/arm/translate.h | 5 |
8 files changed, 178 insertions, 128 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 04b062cb7e..b357aee778 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -304,33 +304,6 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) -static void arm_v7m_unassigned_access(CPUState *cpu, hwaddr addr, - bool is_write, bool is_exec, int opaque, - unsigned size) -{ - ARMCPU *arm = ARM_CPU(cpu); - CPUARMState *env = &arm->env; - - /* ARMv7-M interrupt return works by loading a magic value into the PC. - * On real hardware the load causes the return to occur. The qemu - * implementation performs the jump normally, then does the exception - * return by throwing a special exception when when the CPU tries to - * execute code at the magic address. - */ - if (env->v7m.exception != 0 && addr >= 0xfffffff0 && is_exec) { - cpu->exception_index = EXCP_EXCEPTION_EXIT; - cpu_loop_exit(cpu); - } - - /* In real hardware an attempt to access parts of the address space - * with nothing there will usually cause an external abort. - * However our QEMU board models are often missing device models where - * the guest can boot anyway with the default read-as-zero/writes-ignored - * behaviour that you get without a QEMU unassigned_access hook. - * So just return here to retain that default behaviour. - */ -} - static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { CPUClass *cc = CPU_GET_CLASS(cs); @@ -338,17 +311,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) CPUARMState *env = &cpu->env; bool ret = false; - /* ARMv7-M interrupt return works by loading a magic value - * into the PC. On real hardware the load causes the - * return to occur. The qemu implementation performs the - * jump normally, then does the exception return when the - * CPU tries to execute code at the magic address. - * This will cause the magic PC value to be pushed to - * the stack if an interrupt occurred at the wrong time. - * We avoid this by disabling interrupts when - * pc contains a magic address. - * - * ARMv7-M interrupt masking works differently than -A or -R. + /* ARMv7-M interrupt masking works differently than -A or -R. * There is no FIQ/IRQ distinction. Instead of I and F bits * masking FIQ and IRQ interrupts, an exception is taken only * if it is higher priority than the current execution priority @@ -356,8 +319,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) * currently active exception). */ if (interrupt_request & CPU_INTERRUPT_HARD - && (armv7m_nvic_can_take_pending_exception(env->nvic)) - && (env->regs[15] < 0xfffffff0)) { + && (armv7m_nvic_can_take_pending_exception(env->nvic))) { cs->exception_index = EXCP_IRQ; cc->do_interrupt(cs); ret = true; @@ -1091,7 +1053,6 @@ static void arm_v7m_class_init(ObjectClass *oc, void *data) cc->do_interrupt = arm_v7m_cpu_do_interrupt; #endif - cc->do_unassigned_access = arm_v7m_unassigned_access; cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt; } diff --git a/target/arm/cpu.h b/target/arm/cpu.h index a8aabce7dd..1055bfef3d 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -58,6 +58,7 @@ #define EXCP_SEMIHOST 16 /* semihosting call */ #define EXCP_NOCP 17 /* v7M NOCP UsageFault */ #define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */ +/* NB: add new EXCP_ defines to the array in arm_log_exception() too */ #define ARMV7M_EXCP_RESET 1 #define ARMV7M_EXCP_NMI 2 @@ -2290,6 +2291,9 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) #define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT) #define ARM_TBFLAG_BE_DATA_SHIFT 20 #define ARM_TBFLAG_BE_DATA_MASK (1 << ARM_TBFLAG_BE_DATA_SHIFT) +/* For M profile only, Handler (ie not Thread) mode */ +#define ARM_TBFLAG_HANDLER_SHIFT 21 +#define ARM_TBFLAG_HANDLER_MASK (1 << ARM_TBFLAG_HANDLER_SHIFT) /* Bit usage when in AArch64 state */ #define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */ @@ -2326,6 +2330,8 @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) (((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT) #define ARM_TBFLAG_BE_DATA(F) \ (((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT) +#define ARM_TBFLAG_HANDLER(F) \ + (((F) & ARM_TBFLAG_HANDLER_MASK) >> ARM_TBFLAG_HANDLER_SHIFT) #define ARM_TBFLAG_TBI0(F) \ (((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT) #define ARM_TBFLAG_TBI1(F) \ @@ -2516,6 +2522,10 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, } *flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT; + if (env->v7m.exception != 0) { + *flags |= ARM_TBFLAG_HANDLER_MASK; + } + *cs_base = 0; } diff --git a/target/arm/helper.c b/target/arm/helper.c index 8cb7a9451c..8a3e4480aa 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -6271,6 +6271,25 @@ static void arm_log_exception(int idx) { if (qemu_loglevel_mask(CPU_LOG_INT)) { const char *exc = NULL; + static const char * const excnames[] = { + [EXCP_UDEF] = "Undefined Instruction", + [EXCP_SWI] = "SVC", + [EXCP_PREFETCH_ABORT] = "Prefetch Abort", + [EXCP_DATA_ABORT] = "Data Abort", + [EXCP_IRQ] = "IRQ", + [EXCP_FIQ] = "FIQ", + [EXCP_BKPT] = "Breakpoint", + [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", + [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", + [EXCP_HVC] = "Hypervisor Call", + [EXCP_HYP_TRAP] = "Hypervisor Trap", + [EXCP_SMC] = "Secure Monitor Call", + [EXCP_VIRQ] = "Virtual IRQ", + [EXCP_VFIQ] = "Virtual FIQ", + [EXCP_SEMIHOST] = "Semihosting call", + [EXCP_NOCP] = "v7M NOCP UsageFault", + [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", + }; if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { exc = excnames[idx]; diff --git a/target/arm/internals.h b/target/arm/internals.h index f742a419ff..1f6efef7c4 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -51,27 +51,6 @@ static inline bool excp_is_internal(int excp) || excp == EXCP_SEMIHOST; } -/* Exception names for debug logging; note that not all of these - * precisely correspond to architectural exceptions. - */ -static const char * const excnames[] = { - [EXCP_UDEF] = "Undefined Instruction", - [EXCP_SWI] = "SVC", - [EXCP_PREFETCH_ABORT] = "Prefetch Abort", - [EXCP_DATA_ABORT] = "Data Abort", - [EXCP_IRQ] = "IRQ", - [EXCP_FIQ] = "FIQ", - [EXCP_BKPT] = "Breakpoint", - [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", - [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", - [EXCP_HVC] = "Hypervisor Call", - [EXCP_HYP_TRAP] = "Hypervisor Trap", - [EXCP_SMC] = "Secure Monitor Call", - [EXCP_VIRQ] = "Virtual IRQ", - [EXCP_VFIQ] = "Virtual FIQ", - [EXCP_SEMIHOST] = "Semihosting call", -}; - /* Scale factor for generic timers, ie number of ns per tick. * This gives a 62.5MHz timer. */ diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 61111091ad..a16abc8d12 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -940,7 +940,7 @@ bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) * single step at this point so something has gone wrong. */ error_report("%s: guest single-step while debugging unsupported" - " (%"PRIx64", %"PRIx32")\n", + " (%"PRIx64", %"PRIx32")", __func__, env->pc, debug_exit->hsr); return false; } @@ -965,7 +965,7 @@ bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) break; } default: - error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")\n", + error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", __func__, debug_exit->hsr, env->pc); } diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index d64c8670fa..156b825040 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -130,7 +130,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, if (unlikely(ret)) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; - uint32_t syn, exc; + uint32_t syn, exc, fsc; unsigned int target_el; bool same_el; @@ -145,19 +145,32 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; } same_el = arm_current_el(env) == target_el; - /* AArch64 syndrome does not have an LPAE bit */ - syn = fsr & ~(1 << 9); + + if (fsr & (1 << 9)) { + /* LPAE format fault status register : bottom 6 bits are + * status code in the same form as needed for syndrome + */ + fsc = extract32(fsr, 0, 6); + } else { + /* Short format FSR : this fault will never actually be reported + * to an EL that uses a syndrome register. Check that here, + * and use a (currently) reserved FSR code in case the constructed + * syndrome does leak into the guest somehow. + */ + assert(target_el != 2 && !arm_el_is_aa64(env, target_el)); + fsc = 0x3f; + } /* For insn and data aborts we assume there is no instruction syndrome * information; this is always true for exceptions reported to EL1. */ if (access_type == MMU_INST_FETCH) { - syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn); + syn = syn_insn_abort(same_el, 0, fi.s1ptw, fsc); exc = EXCP_PREFETCH_ABORT; } else { syn = merge_syn_data_abort(env->exception.syndrome, target_el, same_el, fi.s1ptw, - access_type == MMU_DATA_STORE, syn); + access_type == MMU_DATA_STORE, fsc); if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) { fsr |= (1 << 11); diff --git a/target/arm/translate.c b/target/arm/translate.c index e32e38cadd..0b5a0bca06 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -296,6 +296,30 @@ static void gen_step_complete_exception(DisasContext *s) s->is_jmp = DISAS_EXC; } +static void gen_singlestep_exception(DisasContext *s) +{ + /* Generate the right kind of exception for singlestep, which is + * either the architectural singlestep or EXCP_DEBUG for QEMU's + * gdb singlestepping. + */ + if (s->ss_active) { + gen_step_complete_exception(s); + } else { + gen_exception_internal(EXCP_DEBUG); + } +} + +static inline bool is_singlestepping(DisasContext *s) +{ + /* Return true if we are singlestepping either because of + * architectural singlestep or QEMU gdbstub singlestep. This does + * not include the command line '-singlestep' mode which is rather + * misnamed as it only means "one instruction per TB" and doesn't + * affect the code we generate. + */ + return s->singlestep_enabled || s->ss_active; +} + static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b) { TCGv_i32 tmp1 = tcg_temp_new_i32(); @@ -880,6 +904,21 @@ static const uint8_t table_logic_cc[16] = { 1, /* mvn */ }; +static inline void gen_set_condexec(DisasContext *s) +{ + if (s->condexec_mask) { + uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); + TCGv_i32 tmp = tcg_temp_new_i32(); + tcg_gen_movi_i32(tmp, val); + store_cpu_field(tmp, condexec_bits); + } +} + +static inline void gen_set_pc_im(DisasContext *s, target_ulong val) +{ + tcg_gen_movi_i32(cpu_R[15], val); +} + /* Set PC and Thumb state from an immediate address. */ static inline void gen_bx_im(DisasContext *s, uint32_t addr) { @@ -904,6 +943,51 @@ static inline void gen_bx(DisasContext *s, TCGv_i32 var) store_cpu_field(var, thumb); } +/* Set PC and Thumb state from var. var is marked as dead. + * For M-profile CPUs, include logic to detect exception-return + * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC, + * and BX reg, and no others, and happens only for code in Handler mode. + */ +static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var) +{ + /* Generate the same code here as for a simple bx, but flag via + * s->is_jmp that we need to do the rest of the work later. + */ + gen_bx(s, var); + if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) { + s->is_jmp = DISAS_BX_EXCRET; + } +} + +static inline void gen_bx_excret_final_code(DisasContext *s) +{ + /* Generate the code to finish possible exception return and end the TB */ + TCGLabel *excret_label = gen_new_label(); + + /* Is the new PC value in the magic range indicating exception return? */ + tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label); + /* No: end the TB as we would for a DISAS_JMP */ + if (is_singlestepping(s)) { + gen_singlestep_exception(s); + } else { + tcg_gen_exit_tb(0); + } + gen_set_label(excret_label); + /* Yes: this is an exception return. + * At this point in runtime env->regs[15] and env->thumb will hold + * the exception-return magic number, which do_v7m_exception_exit() + * will read. Nothing else will be able to see those values because + * the cpu-exec main loop guarantees that we will always go straight + * from raising the exception to the exception-handling code. + * + * gen_ss_advance(s) does nothing on M profile currently but + * calling it is conceptually the right thing as we have executed + * this instruction (compare SWI, HVC, SMC handling). + */ + gen_ss_advance(s); + gen_exception_internal(EXCP_EXCEPTION_EXIT); +} + /* Variant of store_reg which uses branch&exchange logic when storing to r15 in ARM architecture v7 and above. The source must be a temporary and will be marked as dead. */ @@ -923,7 +1007,7 @@ static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var) static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) { if (reg == 15 && ENABLE_ARCH_5) { - gen_bx(s, var); + gen_bx_excret(s, var); } else { store_reg(s, reg, var); } @@ -1056,11 +1140,6 @@ DO_GEN_ST(8, MO_UB) DO_GEN_ST(16, MO_UW) DO_GEN_ST(32, MO_UL) -static inline void gen_set_pc_im(DisasContext *s, target_ulong val) -{ - tcg_gen_movi_i32(cpu_R[15], val); -} - static inline void gen_hvc(DisasContext *s, int imm16) { /* The pre HVC helper handles cases when HVC gets trapped @@ -1094,17 +1173,6 @@ static inline void gen_smc(DisasContext *s) s->is_jmp = DISAS_SMC; } -static inline void -gen_set_condexec (DisasContext *s) -{ - if (s->condexec_mask) { - uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, val); - store_cpu_field(tmp, condexec_bits); - } -} - static void gen_exception_internal_insn(DisasContext *s, int offset, int excp) { gen_set_condexec(s); @@ -4092,7 +4160,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest) static inline void gen_jmp (DisasContext *s, uint32_t dest) { - if (unlikely(s->singlestep_enabled || s->ss_active)) { + if (unlikely(is_singlestepping(s))) { /* An indirect jump so that we still trigger the debug exception. */ if (s->thumb) dest |= 1; @@ -9858,7 +9926,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw tmp = tcg_temp_new_i32(); gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); if (i == 15) { - gen_bx(s, tmp); + gen_bx_excret(s, tmp); } else if (i == rn) { loaded_var = tmp; loaded_base = 1; @@ -9959,7 +10027,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw gen_arm_shift_reg(tmp, op, tmp2, logic_cc); if (logic_cc) gen_logic_CC(tmp); - store_reg_bx(s, rd, tmp); + store_reg(s, rd, tmp); break; case 1: /* Sign/zero extend. */ op = (insn >> 20) & 7; @@ -10485,7 +10553,12 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw } break; case 4: /* bxj */ - /* Trivial implementation equivalent to bx. */ + /* Trivial implementation equivalent to bx. + * This instruction doesn't exist at all for M-profile. + */ + if (arm_dc_feature(s, ARM_FEATURE_M)) { + goto illegal_op; + } tmp = load_reg(s, rn); gen_bx(s, tmp); break; @@ -10885,7 +10958,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw goto illegal_op; } if (rs == 15) { - gen_bx(s, tmp); + gen_bx_excret(s, tmp); } else { store_reg(s, rs, tmp); } @@ -11075,9 +11148,11 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s) tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, val); store_reg(s, 14, tmp2); + gen_bx(s, tmp); + } else { + /* Only BX works as exception-return, not BLX */ + gen_bx_excret(s, tmp); } - /* already thumb, no need to check */ - gen_bx(s, tmp); break; } break; @@ -11752,6 +11827,7 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags); + dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(tb->flags); dc->cp_regs = cpu->cp_regs; dc->features = env->features; @@ -11851,14 +11927,6 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) dc->is_jmp = DISAS_EXC; break; } -#else - if (arm_dc_feature(dc, ARM_FEATURE_M)) { - /* Branches to the magic exception-return addresses should - * already have been caught via the arm_v7m_unassigned_access hook, - * and never get here. - */ - assert(dc->pc < 0xfffffff0); - } #endif if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { @@ -11953,9 +12021,8 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc)); } while (!dc->is_jmp && !tcg_op_buf_full() && - !cs->singlestep_enabled && + !is_singlestepping(dc) && !singlestep && - !dc->ss_active && !end_of_page && num_insns < max_insns); @@ -11971,9 +12038,16 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) /* At this stage dc->condjmp will only be set when the skipped instruction was a conditional branch or trap, and the PC has already been written. */ - if (unlikely(cs->singlestep_enabled || dc->ss_active)) { + gen_set_condexec(dc); + if (dc->is_jmp == DISAS_BX_EXCRET) { + /* Exception return branches need some special case code at the + * end of the TB, which is complex enough that it has to + * handle the single-step vs not and the condition-failed + * insn codepath itself. + */ + gen_bx_excret_final_code(dc); + } else if (unlikely(is_singlestepping(dc))) { /* Unconditional and "condition passed" instruction codepath. */ - gen_set_condexec(dc); switch (dc->is_jmp) { case DISAS_SWI: gen_ss_advance(dc); @@ -11993,24 +12067,8 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) gen_set_pc_im(dc, dc->pc); /* fall through */ default: - if (dc->ss_active) { - gen_step_complete_exception(dc); - } else { - /* FIXME: Single stepping a WFI insn will not halt - the CPU. */ - gen_exception_internal(EXCP_DEBUG); - } - } - if (dc->condjmp) { - /* "Condition failed" instruction codepath. */ - gen_set_label(dc->condlabel); - gen_set_condexec(dc); - gen_set_pc_im(dc, dc->pc); - if (dc->ss_active) { - gen_step_complete_exception(dc); - } else { - gen_exception_internal(EXCP_DEBUG); - } + /* FIXME: Single stepping a WFI insn will not halt the CPU. */ + gen_singlestep_exception(dc); } } else { /* While branches must always occur at the end of an IT block, @@ -12021,7 +12079,6 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) - Hardware watchpoints. Hardware breakpoints have already been handled and skip this code. */ - gen_set_condexec(dc); switch(dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); @@ -12061,11 +12118,17 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) gen_exception(EXCP_SMC, syn_aa32_smc(), 3); break; } - if (dc->condjmp) { - gen_set_label(dc->condlabel); - gen_set_condexec(dc); + } + + if (dc->condjmp) { + /* "Condition failed" instruction codepath for the branch/trap insn */ + gen_set_label(dc->condlabel); + gen_set_condexec(dc); + if (unlikely(is_singlestepping(dc))) { + gen_set_pc_im(dc, dc->pc); + gen_singlestep_exception(dc); + } else { gen_goto_tb(dc, 1, dc->pc); - dc->condjmp = 0; } } diff --git a/target/arm/translate.h b/target/arm/translate.h index abb0760158..629dab945e 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -31,6 +31,7 @@ typedef struct DisasContext { bool vfp_enabled; /* FP enabled via FPSCR.EN */ int vec_len; int vec_stride; + bool v7m_handler_mode; /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI * so that top level loop can generate correct syndrome information. */ @@ -134,6 +135,10 @@ static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) #define DISAS_HVC 8 #define DISAS_SMC 9 #define DISAS_YIELD 10 +/* M profile branch which might be an exception return (and so needs + * custom end-of-TB code) + */ +#define DISAS_BX_EXCRET 11 #ifdef TARGET_AARCH64 void a64_translate_init(void); |