aboutsummaryrefslogtreecommitdiff
path: root/target/arm/helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r--target/arm/helper.c245
1 files changed, 179 insertions, 66 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index bcedb4a808..3f4211b572 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -6002,22 +6002,165 @@ static void switch_v7m_sp(CPUARMState *env, bool new_spsel)
}
}
-static void do_v7m_exception_exit(CPUARMState *env)
+static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
{
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ MemTxResult result;
+ hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4;
+ uint32_t addr;
+
+ addr = address_space_ldl(cs->as, vec,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ /* Architecturally this should cause a HardFault setting HSFR.VECTTBL,
+ * which would then be immediately followed by our failing to load
+ * the entry vector for that HardFault, which is a Lockup case.
+ * Since we don't model Lockup, we just report this guest error
+ * via cpu_abort().
+ */
+ cpu_abort(cs, "Failed to read from exception vector table "
+ "entry %08x\n", (unsigned)vec);
+ }
+ return addr;
+}
+
+static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr)
+{
+ /* Do the "take the exception" parts of exception entry,
+ * but not the pushing of state to the stack. This is
+ * similar to the pseudocode ExceptionTaken() function.
+ */
+ CPUARMState *env = &cpu->env;
+ uint32_t addr;
+
+ armv7m_nvic_acknowledge_irq(env->nvic);
+ switch_v7m_sp(env, 0);
+ /* Clear IT bits */
+ env->condexec_bits = 0;
+ env->regs[14] = lr;
+ addr = arm_v7m_load_vector(cpu);
+ env->regs[15] = addr & 0xfffffffe;
+ env->thumb = addr & 1;
+}
+
+static void v7m_push_stack(ARMCPU *cpu)
+{
+ /* Do the "set up stack frame" part of exception entry,
+ * similar to pseudocode PushStack().
+ */
+ CPUARMState *env = &cpu->env;
+ uint32_t xpsr = xpsr_read(env);
+
+ /* Align stack pointer if the guest wants that */
+ if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
+ env->regs[13] -= 4;
+ xpsr |= 0x200;
+ }
+ /* Switch to the handler mode. */
+ v7m_push(env, xpsr);
+ v7m_push(env, env->regs[15]);
+ v7m_push(env, env->regs[14]);
+ v7m_push(env, env->regs[12]);
+ v7m_push(env, env->regs[3]);
+ v7m_push(env, env->regs[2]);
+ v7m_push(env, env->regs[1]);
+ v7m_push(env, env->regs[0]);
+}
+
+static void do_v7m_exception_exit(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
uint32_t type;
uint32_t xpsr;
-
+ bool ufault = false;
+ bool return_to_sp_process = false;
+ bool return_to_handler = false;
+ bool rettobase = false;
+
+ /* We can only get here from an EXCP_EXCEPTION_EXIT, and
+ * arm_v7m_do_unassigned_access() enforces the architectural rule
+ * that jumps to magic addresses don't have magic behaviour unless
+ * we're in Handler mode (compare pseudocode BXWritePC()).
+ */
+ assert(env->v7m.exception != 0);
+
+ /* In the spec pseudocode ExceptionReturn() is called directly
+ * from BXWritePC() and gets the full target PC value including
+ * bit zero. In QEMU's implementation we treat it as a normal
+ * jump-to-register (which is then caught later on), and so split
+ * the target value up between env->regs[15] and env->thumb in
+ * gen_bx(). Reconstitute it.
+ */
type = env->regs[15];
+ if (env->thumb) {
+ type |= 1;
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
+ " previous exception %d\n",
+ type, env->v7m.exception);
+
+ if (extract32(type, 5, 23) != extract32(-1, 5, 23)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
+ "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", type);
+ }
+
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
/* Auto-clear FAULTMASK on return from other than NMI */
env->daif &= ~PSTATE_F;
}
- if (env->v7m.exception != 0) {
- armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
+
+ switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
+ case -1:
+ /* attempt to exit an exception that isn't active */
+ ufault = true;
+ break;
+ case 0:
+ /* still an irq active now */
+ break;
+ case 1:
+ /* we returned to base exception level, no nesting.
+ * (In the pseudocode this is written using "NestedActivation != 1"
+ * where we have 'rettobase == false'.)
+ */
+ rettobase = true;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ switch (type & 0xf) {
+ case 1: /* Return to Handler */
+ return_to_handler = true;
+ break;
+ case 13: /* Return to Thread using Process stack */
+ return_to_sp_process = true;
+ /* fall through */
+ case 9: /* Return to Thread using Main stack */
+ if (!rettobase &&
+ !(env->v7m.ccr & R_V7M_CCR_NONBASETHRDENA_MASK)) {
+ ufault = true;
+ }
+ break;
+ default:
+ ufault = true;
+ }
+
+ if (ufault) {
+ /* Bad exception return: instead of popping the exception
+ * stack, directly take a usage fault on the current stack.
+ */
+ env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
+ v7m_exception_taken(cpu, type | 0xf0000000);
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
+ "stackframe: failed exception return integrity check\n");
+ return;
}
/* Switch to the target stack. */
- switch_v7m_sp(env, (type & 4) != 0);
+ switch_v7m_sp(env, return_to_sp_process);
/* Pop registers. */
env->regs[0] = v7m_pop(env);
env->regs[1] = v7m_pop(env);
@@ -6041,11 +6184,24 @@ static void do_v7m_exception_exit(CPUARMState *env)
/* Undo stack alignment. */
if (xpsr & 0x200)
env->regs[13] |= 4;
- /* ??? The exception return type specifies Thread/Handler mode. However
- this is also implied by the xPSR value. Not sure what to do
- if there is a mismatch. */
- /* ??? Likewise for mismatches between the CONTROL register and the stack
- pointer. */
+
+ /* The restored xPSR exception field will be zero if we're
+ * resuming in Thread mode. If that doesn't match what the
+ * exception return type specified then this is a UsageFault.
+ */
+ if (return_to_handler == (env->v7m.exception == 0)) {
+ /* Take an INVPC UsageFault by pushing the stack again. */
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
+ env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
+ v7m_push_stack(cpu);
+ v7m_exception_taken(cpu, type | 0xf0000000);
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
+ "failed exception return integrity check\n");
+ return;
+ }
+
+ /* Otherwise, we have a successful exception exit. */
+ qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
}
static void arm_log_exception(int idx)
@@ -6063,37 +6219,11 @@ static void arm_log_exception(int idx)
}
}
-static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
-
-{
- CPUState *cs = CPU(cpu);
- CPUARMState *env = &cpu->env;
- MemTxResult result;
- hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4;
- uint32_t addr;
-
- addr = address_space_ldl(cs->as, vec,
- MEMTXATTRS_UNSPECIFIED, &result);
- if (result != MEMTX_OK) {
- /* Architecturally this should cause a HardFault setting HSFR.VECTTBL,
- * which would then be immediately followed by our failing to load
- * the entry vector for that HardFault, which is a Lockup case.
- * Since we don't model Lockup, we just report this guest error
- * via cpu_abort().
- */
- cpu_abort(cs, "Failed to read from exception vector table "
- "entry %08x\n", (unsigned)vec);
- }
- return addr;
-}
-
void arm_v7m_cpu_do_interrupt(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- uint32_t xpsr = xpsr_read(env);
uint32_t lr;
- uint32_t addr;
arm_log_exception(cs->exception_index);
@@ -6106,28 +6236,30 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
/* For exceptions we just mark as pending on the NVIC, and let that
handle it. */
- /* TODO: Need to escalate if the current priority is higher than the
- one we're raising. */
switch (cs->exception_index) {
case EXCP_UDEF:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
env->v7m.cfsr |= R_V7M_CFSR_UNDEFINSTR_MASK;
- return;
+ break;
case EXCP_NOCP:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
env->v7m.cfsr |= R_V7M_CFSR_NOCP_MASK;
- return;
+ break;
+ case EXCP_INVSTATE:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
+ env->v7m.cfsr |= R_V7M_CFSR_INVSTATE_MASK;
+ break;
case EXCP_SWI:
/* The PC already points to the next instruction. */
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
- return;
+ break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
/* TODO: if we implemented the MPU registers, this is where we
* should set the MMFAR, etc from exception.fsr and exception.vaddress.
*/
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
- return;
+ break;
case EXCP_BKPT:
if (semihosting_enabled()) {
int nr;
@@ -6142,39 +6274,20 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
}
}
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
- return;
+ break;
case EXCP_IRQ:
- env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
break;
case EXCP_EXCEPTION_EXIT:
- do_v7m_exception_exit(env);
+ do_v7m_exception_exit(cpu);
return;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
- /* Align stack pointer if the guest wants that */
- if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
- env->regs[13] -= 4;
- xpsr |= 0x200;
- }
- /* Switch to the handler mode. */
- v7m_push(env, xpsr);
- v7m_push(env, env->regs[15]);
- v7m_push(env, env->regs[14]);
- v7m_push(env, env->regs[12]);
- v7m_push(env, env->regs[3]);
- v7m_push(env, env->regs[2]);
- v7m_push(env, env->regs[1]);
- v7m_push(env, env->regs[0]);
- switch_v7m_sp(env, 0);
- /* Clear IT bits */
- env->condexec_bits = 0;
- env->regs[14] = lr;
- addr = arm_v7m_load_vector(cpu);
- env->regs[15] = addr & 0xfffffffe;
- env->thumb = addr & 1;
+ v7m_push_stack(cpu);
+ v7m_exception_taken(cpu, lr);
+ qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
}
/* Function used to synchronize QEMU's AArch64 register set with AArch32