diff options
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r-- | target/arm/helper.c | 548 |
1 files changed, 469 insertions, 79 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c index bfce09643b..180ab75458 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -4266,6 +4266,125 @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { REGINFO_SENTINEL }; +/* Return the exception level to which SVE-disabled exceptions should + * be taken, or 0 if SVE is enabled. + */ +static int sve_exception_el(CPUARMState *env) +{ +#ifndef CONFIG_USER_ONLY + unsigned current_el = arm_current_el(env); + + /* The CPACR.ZEN controls traps to EL1: + * 0, 2 : trap EL0 and EL1 accesses + * 1 : trap only EL0 accesses + * 3 : trap no accesses + */ + switch (extract32(env->cp15.cpacr_el1, 16, 2)) { + default: + if (current_el <= 1) { + /* Trap to PL1, which might be EL1 or EL3 */ + if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { + return 3; + } + return 1; + } + break; + case 1: + if (current_el == 0) { + return 1; + } + break; + case 3: + break; + } + + /* Similarly for CPACR.FPEN, after having checked ZEN. */ + switch (extract32(env->cp15.cpacr_el1, 20, 2)) { + default: + if (current_el <= 1) { + if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { + return 3; + } + return 1; + } + break; + case 1: + if (current_el == 0) { + return 1; + } + break; + case 3: + break; + } + + /* CPTR_EL2. Check both TZ and TFP. */ + if (current_el <= 2 + && (env->cp15.cptr_el[2] & (CPTR_TFP | CPTR_TZ)) + && !arm_is_secure_below_el3(env)) { + return 2; + } + + /* CPTR_EL3. Check both EZ and TFP. */ + if (!(env->cp15.cptr_el[3] & CPTR_EZ) + || (env->cp15.cptr_el[3] & CPTR_TFP)) { + return 3; + } +#endif + return 0; +} + +static CPAccessResult zcr_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + switch (sve_exception_el(env)) { + case 3: + return CP_ACCESS_TRAP_EL3; + case 2: + return CP_ACCESS_TRAP_EL2; + case 1: + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Bits other than [3:0] are RAZ/WI. */ + raw_write(env, ri, value & 0xf); +} + +static const ARMCPRegInfo zcr_el1_reginfo = { + .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = zcr_access, .type = ARM_CP_64BIT, + .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), + .writefn = zcr_write, .raw_writefn = raw_write +}; + +static const ARMCPRegInfo zcr_el2_reginfo = { + .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL2_RW, .accessfn = zcr_access, .type = ARM_CP_64BIT, + .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), + .writefn = zcr_write, .raw_writefn = raw_write +}; + +static const ARMCPRegInfo zcr_no_el2_reginfo = { + .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL2_RW, .type = ARM_CP_64BIT, + .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore +}; + +static const ARMCPRegInfo zcr_el3_reginfo = { + .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, + .access = PL3_RW, .accessfn = zcr_access, .type = ARM_CP_64BIT, + .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), + .writefn = zcr_write, .raw_writefn = raw_write +}; + void hw_watchpoint_update(ARMCPU *cpu, int n) { CPUARMState *env = &cpu->env; @@ -5332,6 +5451,18 @@ void register_cp_regs_for_features(ARMCPU *cpu) } define_one_arm_cp_reg(cpu, &sctlr); } + + if (arm_feature(env, ARM_FEATURE_SVE)) { + define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); + if (arm_feature(env, ARM_FEATURE_EL2)) { + define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); + } else { + define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); + } + if (arm_feature(env, ARM_FEATURE_EL3)) { + define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); + } + } } void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) @@ -6161,12 +6292,127 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, return target_el; } -static void v7m_push(CPUARMState *env, uint32_t val) +static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, + ARMMMUIdx mmu_idx, bool ignfault) { - CPUState *cs = CPU(arm_env_get_cpu(env)); + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + MemTxAttrs attrs = {}; + MemTxResult txres; + target_ulong page_size; + hwaddr physaddr; + int prot; + ARMMMUFaultInfo fi; + bool secure = mmu_idx & ARM_MMU_IDX_M_S; + int exc; + bool exc_secure; + + if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, + &attrs, &prot, &page_size, &fi, NULL)) { + /* MPU/SAU lookup failed */ + if (fi.type == ARMFault_QEMU_SFault) { + qemu_log_mask(CPU_LOG_INT, + "...SecureFault with SFSR.AUVIOL during stacking\n"); + env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; + env->v7m.sfar = addr; + exc = ARMV7M_EXCP_SECURE; + exc_secure = false; + } else { + qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n"); + env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK; + exc = ARMV7M_EXCP_MEM; + exc_secure = secure; + } + goto pend_fault; + } + address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value, + attrs, &txres); + if (txres != MEMTX_OK) { + /* BusFault trying to write the data */ + qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n"); + env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK; + exc = ARMV7M_EXCP_BUS; + exc_secure = false; + goto pend_fault; + } + return true; + +pend_fault: + /* By pending the exception at this point we are making + * the IMPDEF choice "overridden exceptions pended" (see the + * MergeExcInfo() pseudocode). The other choice would be to not + * pend them now and then make a choice about which to throw away + * later if we have two derived exceptions. + * The only case when we must not pend the exception but instead + * throw it away is if we are doing the push of the callee registers + * and we've already generated a derived exception. Even in this + * case we will still update the fault status registers. + */ + if (!ignfault) { + armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure); + } + return false; +} + +static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, + ARMMMUIdx mmu_idx) +{ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + MemTxAttrs attrs = {}; + MemTxResult txres; + target_ulong page_size; + hwaddr physaddr; + int prot; + ARMMMUFaultInfo fi; + bool secure = mmu_idx & ARM_MMU_IDX_M_S; + int exc; + bool exc_secure; + uint32_t value; + + if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, + &attrs, &prot, &page_size, &fi, NULL)) { + /* MPU/SAU lookup failed */ + if (fi.type == ARMFault_QEMU_SFault) { + qemu_log_mask(CPU_LOG_INT, + "...SecureFault with SFSR.AUVIOL during unstack\n"); + env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; + env->v7m.sfar = addr; + exc = ARMV7M_EXCP_SECURE; + exc_secure = false; + } else { + qemu_log_mask(CPU_LOG_INT, + "...MemManageFault with CFSR.MUNSTKERR\n"); + env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK; + exc = ARMV7M_EXCP_MEM; + exc_secure = secure; + } + goto pend_fault; + } - env->regs[13] -= 4; - stl_phys(cs->as, env->regs[13], val); + value = address_space_ldl(arm_addressspace(cs, attrs), physaddr, + attrs, &txres); + if (txres != MEMTX_OK) { + /* BusFault trying to read the data */ + qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); + env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK; + exc = ARMV7M_EXCP_BUS; + exc_secure = false; + goto pend_fault; + } + + *dest = value; + return true; + +pend_fault: + /* By pending the exception at this point we are making + * the IMPDEF choice "overridden exceptions pended" (see the + * MergeExcInfo() pseudocode). The other choice would be to not + * pend them now and then make a choice about which to throw away + * later if we have two derived exceptions. + */ + armv7m_nvic_set_pending(env->nvic, exc, exc_secure); + return false; } /* Return true if we're using the process stack pointer (not the MSP) */ @@ -6395,65 +6641,126 @@ static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, } } -static uint32_t arm_v7m_load_vector(ARMCPU *cpu, bool targets_secure) +static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, + uint32_t *pvec) { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; MemTxResult result; - hwaddr vec = env->v7m.vecbase[targets_secure] + env->v7m.exception * 4; - uint32_t addr; + uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4; + uint32_t vector_entry; + MemTxAttrs attrs = {}; + ARMMMUIdx mmu_idx; + bool exc_secure; + + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); + + /* We don't do a get_phys_addr() here because the rules for vector + * loads are special: they always use the default memory map, and + * the default memory map permits reads from all addresses. + * Since there's no easy way to pass through to pmsav8_mpu_lookup() + * that we want this special case which would always say "yes", + * we just do the SAU lookup here followed by a direct physical load. + */ + attrs.secure = targets_secure; + attrs.user = false; - addr = address_space_ldl(cs->as, vec, - MEMTXATTRS_UNSPECIFIED, &result); + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + V8M_SAttributes sattrs = {}; + + v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); + if (sattrs.ns) { + attrs.secure = false; + } else if (!targets_secure) { + /* NS access to S memory */ + goto load_fail; + } + } + + vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr, + attrs, &result); if (result != MEMTX_OK) { - /* Architecturally this should cause a HardFault setting HSFR.VECTTBL, - * which would then be immediately followed by our failing to load - * the entry vector for that HardFault, which is a Lockup case. - * Since we don't model Lockup, we just report this guest error - * via cpu_abort(). - */ - cpu_abort(cs, "Failed to read from %s exception vector table " - "entry %08x\n", targets_secure ? "secure" : "nonsecure", - (unsigned)vec); + goto load_fail; } - return addr; + *pvec = vector_entry; + return true; + +load_fail: + /* All vector table fetch fails are reported as HardFault, with + * HFSR.VECTTBL and .FORCED set. (FORCED is set because + * technically the underlying exception is a MemManage or BusFault + * that is escalated to HardFault.) This is a terminal exception, + * so we will either take the HardFault immediately or else enter + * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()). + */ + exc_secure = targets_secure || + !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); + env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK; + armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure); + return false; } -static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain) +static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, + bool ignore_faults) { /* For v8M, push the callee-saves register part of the stack frame. * Compare the v8M pseudocode PushCalleeStack(). * In the tailchaining case this may not be the current stack. */ CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); uint32_t *frame_sp_p; uint32_t frameptr; + ARMMMUIdx mmu_idx; + bool stacked_ok; if (dotailchain) { - frame_sp_p = get_v7m_sp_ptr(env, true, - lr & R_V7M_EXCRET_MODE_MASK, + bool mode = lr & R_V7M_EXCRET_MODE_MASK; + bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || + !mode; + + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); + frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, lr & R_V7M_EXCRET_SPSEL_MASK); } else { + mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); frame_sp_p = &env->regs[13]; } frameptr = *frame_sp_p - 0x28; - stl_phys(cs->as, frameptr, 0xfefa125b); - stl_phys(cs->as, frameptr + 0x8, env->regs[4]); - stl_phys(cs->as, frameptr + 0xc, env->regs[5]); - stl_phys(cs->as, frameptr + 0x10, env->regs[6]); - stl_phys(cs->as, frameptr + 0x14, env->regs[7]); - stl_phys(cs->as, frameptr + 0x18, env->regs[8]); - stl_phys(cs->as, frameptr + 0x1c, env->regs[9]); - stl_phys(cs->as, frameptr + 0x20, env->regs[10]); - stl_phys(cs->as, frameptr + 0x24, env->regs[11]); - + /* Write as much of the stack frame as we can. A write failure may + * cause us to pend a derived exception. + */ + stacked_ok = + v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, + ignore_faults) && + v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, + ignore_faults); + + /* Update SP regardless of whether any of the stack accesses failed. + * When we implement v8M stack limit checking then this attempt to + * update SP might also fail and result in a derived exception. + */ *frame_sp_p = frameptr; + + return !stacked_ok; } -static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain) +static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, + bool ignore_stackfaults) { /* Do the "take the exception" parts of exception entry, * but not the pushing of state to the stack. This is @@ -6462,8 +6769,10 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain) CPUARMState *env = &cpu->env; uint32_t addr; bool targets_secure; + int exc; + bool push_failed = false; - targets_secure = armv7m_nvic_acknowledge_irq(env->nvic); + armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); if (arm_feature(env, ARM_FEATURE_V8)) { if (arm_feature(env, ARM_FEATURE_M_SECURITY) && @@ -6489,7 +6798,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain) */ if (lr & R_V7M_EXCRET_DCRS_MASK && !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) { - v7m_push_callee_stack(cpu, lr, dotailchain); + push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, + ignore_stackfaults); } lr |= R_V7M_EXCRET_DCRS_MASK; } @@ -6531,6 +6841,27 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain) } } + if (push_failed && !ignore_stackfaults) { + /* Derived exception on callee-saves register stacking: + * we might now want to take a different exception which + * targets a different security state, so try again from the top. + */ + v7m_exception_taken(cpu, lr, true, true); + return; + } + + if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) { + /* Vector load failed: derived exception */ + v7m_exception_taken(cpu, lr, true, true); + return; + } + + /* Now we've done everything that might cause a derived exception + * we can go ahead and activate whichever exception we're going to + * take (which might now be the derived exception). + */ + armv7m_nvic_acknowledge_irq(env->nvic); + /* Switch to target security state -- must do this before writing SPSEL */ switch_v7m_security_state(env, targets_secure); write_v7m_control_spsel(env, 0); @@ -6538,34 +6869,55 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain) /* Clear IT bits */ env->condexec_bits = 0; env->regs[14] = lr; - addr = arm_v7m_load_vector(cpu, targets_secure); env->regs[15] = addr & 0xfffffffe; env->thumb = addr & 1; } -static void v7m_push_stack(ARMCPU *cpu) +static bool v7m_push_stack(ARMCPU *cpu) { /* Do the "set up stack frame" part of exception entry, * similar to pseudocode PushStack(). + * Return true if we generate a derived exception (and so + * should ignore further stack faults trying to process + * that derived exception.) */ + bool stacked_ok; CPUARMState *env = &cpu->env; uint32_t xpsr = xpsr_read(env); + uint32_t frameptr = env->regs[13]; + ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); /* Align stack pointer if the guest wants that */ - if ((env->regs[13] & 4) && + if ((frameptr & 4) && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) { - env->regs[13] -= 4; + frameptr -= 4; xpsr |= XPSR_SPREALIGN; } - /* Switch to the handler mode. */ - v7m_push(env, xpsr); - v7m_push(env, env->regs[15]); - v7m_push(env, env->regs[14]); - v7m_push(env, env->regs[12]); - v7m_push(env, env->regs[3]); - v7m_push(env, env->regs[2]); - v7m_push(env, env->regs[1]); - v7m_push(env, env->regs[0]); + + frameptr -= 0x20; + + /* Write as much of the stack frame as we can. If we fail a stack + * write this will result in a derived exception being pended + * (which may be taken in preference to the one we started with + * if it has higher priority). + */ + stacked_ok = + v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) && + v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) && + v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) && + v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) && + v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) && + v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) && + v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) && + v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false); + + /* Update SP regardless of whether any of the stack accesses failed. + * When we implement v8M stack limit checking then this attempt to + * update SP might also fail and result in a derived exception. + */ + env->regs[13] = frameptr; + + return !stacked_ok; } static void do_v7m_exception_exit(ARMCPU *cpu) @@ -6711,7 +7063,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu) if (sfault) { env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); - v7m_exception_taken(cpu, excret, true); + v7m_exception_taken(cpu, excret, true, false); qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " "stackframe: failed EXC_RETURN.ES validity check\n"); return; @@ -6723,7 +7075,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu) */ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); - v7m_exception_taken(cpu, excret, true); + v7m_exception_taken(cpu, excret, true, false); qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " "stackframe: failed exception return integrity check\n"); return; @@ -6752,6 +7104,11 @@ static void do_v7m_exception_exit(ARMCPU *cpu) !return_to_handler, return_to_sp_process); uint32_t frameptr = *frame_sp_p; + bool pop_ok = true; + ARMMMUIdx mmu_idx; + + mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure, + !return_to_handler); if (!QEMU_IS_ALIGNED(frameptr, 8) && arm_feature(env, ARM_FEATURE_V8)) { @@ -6771,36 +7128,45 @@ static void do_v7m_exception_exit(ARMCPU *cpu) /* Take a SecureFault on the current stack */ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); - v7m_exception_taken(cpu, excret, true); + v7m_exception_taken(cpu, excret, true, false); qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " "stackframe: failed exception return integrity " "signature check\n"); return; } - env->regs[4] = ldl_phys(cs->as, frameptr + 0x8); - env->regs[5] = ldl_phys(cs->as, frameptr + 0xc); - env->regs[6] = ldl_phys(cs->as, frameptr + 0x10); - env->regs[7] = ldl_phys(cs->as, frameptr + 0x14); - env->regs[8] = ldl_phys(cs->as, frameptr + 0x18); - env->regs[9] = ldl_phys(cs->as, frameptr + 0x1c); - env->regs[10] = ldl_phys(cs->as, frameptr + 0x20); - env->regs[11] = ldl_phys(cs->as, frameptr + 0x24); + pop_ok = + v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && + v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && + v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && + v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && + v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) && + v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) && + v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) && + v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) && + v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx); frameptr += 0x28; } - /* Pop registers. TODO: make these accesses use the correct - * attributes and address space (S/NS, priv/unpriv) and handle - * memory transaction failures. - */ - env->regs[0] = ldl_phys(cs->as, frameptr); - env->regs[1] = ldl_phys(cs->as, frameptr + 0x4); - env->regs[2] = ldl_phys(cs->as, frameptr + 0x8); - env->regs[3] = ldl_phys(cs->as, frameptr + 0xc); - env->regs[12] = ldl_phys(cs->as, frameptr + 0x10); - env->regs[14] = ldl_phys(cs->as, frameptr + 0x14); - env->regs[15] = ldl_phys(cs->as, frameptr + 0x18); + /* Pop registers */ + pop_ok = pop_ok && + v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) && + v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) && + v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) && + v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) && + v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) && + v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) && + v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) && + v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); + + if (!pop_ok) { + /* v7m_stack_read() pended a fault, so take it (as a tail + * chained exception on the same stack frame) + */ + v7m_exception_taken(cpu, excret, true, false); + return; + } /* Returning from an exception with a PC with bit 0 set is defined * behaviour on v8M (bit 0 is ignored), but for v7M it was specified @@ -6819,8 +7185,6 @@ static void do_v7m_exception_exit(ARMCPU *cpu) } } - xpsr = ldl_phys(cs->as, frameptr + 0x1c); - if (arm_feature(env, ARM_FEATURE_V8)) { /* For v8M we have to check whether the xPSR exception field * matches the EXCRET value for return to handler/thread @@ -6836,7 +7200,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu) armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; - v7m_exception_taken(cpu, excret, true); + v7m_exception_taken(cpu, excret, true, false); qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " "stackframe: failed exception return integrity " "check\n"); @@ -6869,11 +7233,13 @@ static void do_v7m_exception_exit(ARMCPU *cpu) /* Take an INVPC UsageFault by pushing the stack again; * we know we're v7M so this is never a Secure UsageFault. */ + bool ignore_stackfaults; + assert(!arm_feature(env, ARM_FEATURE_V8)); armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; - v7m_push_stack(cpu); - v7m_exception_taken(cpu, excret, false); + ignore_stackfaults = v7m_push_stack(cpu); + v7m_exception_taken(cpu, excret, false, ignore_stackfaults); qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " "failed exception return integrity check\n"); return; @@ -7114,6 +7480,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; uint32_t lr; + bool ignore_stackfaults; arm_log_exception(cs->exception_index); @@ -7288,8 +7655,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) lr |= R_V7M_EXCRET_MODE_MASK; } - v7m_push_stack(cpu); - v7m_exception_taken(cpu, lr, false); + ignore_stackfaults = v7m_push_stack(cpu); + v7m_exception_taken(cpu, lr, false, ignore_stackfaults); qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception); } @@ -11692,14 +12059,37 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *pflags) { ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); + int fp_el = fp_exception_el(env); uint32_t flags; if (is_a64(env)) { + int sve_el = sve_exception_el(env); + uint32_t zcr_len; + *pc = env->pc; flags = ARM_TBFLAG_AARCH64_STATE_MASK; /* Get control bits for tagged addresses */ flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT); flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); + flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT; + + /* If SVE is disabled, but FP is enabled, + then the effective len is 0. */ + if (sve_el != 0 && fp_el == 0) { + zcr_len = 0; + } else { + int current_el = arm_current_el(env); + + zcr_len = env->vfp.zcr_el[current_el <= 1 ? 1 : current_el]; + zcr_len &= 0xf; + if (current_el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { + zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); + } + if (current_el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { + zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); + } + } + flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT; } else { *pc = env->regs[15]; flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) @@ -11742,7 +12132,7 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, if (arm_cpu_data_is_big_endian(env)) { flags |= ARM_TBFLAG_BE_DATA_MASK; } - flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT; + flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT; if (arm_v7m_is_handler_mode(env)) { flags |= ARM_TBFLAG_HANDLER_MASK; |