aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-04-29 18:05:56 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-04-29 18:05:56 +0100
commit82b2865e0d0ea4c1001e9e7ed7920bcc0458f6de (patch)
tree33bea7bef0327838d25f33e638a2528c4a3faf5e /target
parent413a99a92c13ec408dcf2adaa87918dc81e890c8 (diff)
parent437cc27ddfded3bbab6afd5ac1761e0e195edba7 (diff)
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190429' into staging
target-arm queue: * remove "bag of random stuff" hw/devices.h header * implement FPU for Cortex-M and enable it for Cortex-M4 and -M33 * hw/dma: Compile the bcm2835_dma device as common object * configure: Remove --source-path option * hw/ssi/xilinx_spips: Avoid variable length array * hw/arm/smmuv3: Remove SMMUNotifierNode # gpg: Signature made Mon 29 Apr 2019 17:58:57 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20190429: (42 commits) hw/devices: Move SMSC 91C111 declaration into a new header hw/net/lan9118: Export TYPE_LAN9118 and use it instead of hardcoded string hw/net/ne2000-isa: Add guards to the header hw/devices: Move LAN9118 declarations into a new header hw/devices: Move TI touchscreen declarations into a new header hw/devices: Move Gamepad declarations into a new header hw/devices: Move CBus declarations into a new header hw/devices: Move Blizzard declarations into a new header hw/devices: Move TC6393XB declarations into a new header hw/display/tc6393xb: Remove unused functions hw/arm/nseries: Use TYPE_TMP105 instead of hardcoded string hw/arm/aspeed: Use TYPE_TMP105/TYPE_PCA9552 instead of hardcoded string hw/dma: Compile the bcm2835_dma device as common object target/arm: Enable FPU for Cortex-M4 and Cortex-M33 target/arm: Implement VLLDM for v7M CPUs with an FPU target/arm: Implement VLSTM for v7M CPUs with an FPU target/arm: Implement M-profile lazy FP state preservation target/arm: Add lazy-FP-stacking support to v7m_stack_write() target/arm: New function armv7m_nvic_set_pending_lazyfp() target/arm: New helper function arm_v7m_mmu_idx_all() ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpu.c20
-rw-r--r--target/arm/cpu.h95
-rw-r--r--target/arm/helper.c873
-rw-r--r--target/arm/helper.h5
-rw-r--r--target/arm/machine.c16
-rw-r--r--target/arm/translate.c150
-rw-r--r--target/arm/translate.h3
-rw-r--r--target/arm/vfp_helper.c8
8 files changed, 1089 insertions, 81 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index bb9fdc6304..a181fa8dc1 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -281,6 +281,11 @@ static void arm_cpu_reset(CPUState *s)
env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
}
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ env->v7m.fpccr[M_REG_NS] = R_V7M_FPCCR_ASPEN_MASK;
+ env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK |
+ R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK;
+ }
/* Unlike A/R profile, M profile defines the reset LR value */
env->regs[14] = 0xffffffff;
@@ -1029,6 +1034,13 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
set_feature(env, ARM_FEATURE_THUMB_DSP);
}
+ /*
+ * We rely on no XScale CPU having VFP so we can use the same bits in the
+ * TB flags field for VECSTRIDE and XSCALE_CPAR.
+ */
+ assert(!(arm_feature(env, ARM_FEATURE_VFP) &&
+ arm_feature(env, ARM_FEATURE_XSCALE)));
+
if (arm_feature(env, ARM_FEATURE_V7) &&
!arm_feature(env, ARM_FEATURE_M) &&
!arm_feature(env, ARM_FEATURE_PMSA)) {
@@ -1481,8 +1493,12 @@ static void cortex_m4_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_M);
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
cpu->midr = 0x410fc240; /* r0p0 */
cpu->pmsav7_dregion = 8;
+ cpu->isar.mvfr0 = 0x10110021;
+ cpu->isar.mvfr1 = 0x11000011;
+ cpu->isar.mvfr2 = 0x00000000;
cpu->id_pfr0 = 0x00000030;
cpu->id_pfr1 = 0x00000200;
cpu->id_dfr0 = 0x00100000;
@@ -1509,9 +1525,13 @@ static void cortex_m33_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
set_feature(&cpu->env, ARM_FEATURE_M_SECURITY);
set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
cpu->midr = 0x410fd213; /* r0p3 */
cpu->pmsav7_dregion = 16;
cpu->sau_sregion = 8;
+ cpu->isar.mvfr0 = 0x10110021;
+ cpu->isar.mvfr1 = 0x11000011;
+ cpu->isar.mvfr2 = 0x00000040;
cpu->id_pfr0 = 0x00000030;
cpu->id_pfr1 = 0x00000210;
cpu->id_dfr0 = 0x00200000;
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index f7f2f5a99c..22bc6e00ab 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -57,6 +57,9 @@
#define EXCP_NOCP 17 /* v7M NOCP UsageFault */
#define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */
#define EXCP_STKOF 19 /* v8M STKOF UsageFault */
+#define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */
+#define EXCP_LSERR 21 /* v8M LSERR SecureFault */
+#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1
@@ -533,6 +536,11 @@ typedef struct CPUARMState {
uint32_t scr[M_REG_NUM_BANKS];
uint32_t msplim[M_REG_NUM_BANKS];
uint32_t psplim[M_REG_NUM_BANKS];
+ uint32_t fpcar[M_REG_NUM_BANKS];
+ uint32_t fpccr[M_REG_NUM_BANKS];
+ uint32_t fpdscr[M_REG_NUM_BANKS];
+ uint32_t cpacr[M_REG_NUM_BANKS];
+ uint32_t nsacr;
} v7m;
/* Information associated with an exception about to be taken:
@@ -1576,6 +1584,35 @@ FIELD(V7M_CSSELR, LEVEL, 1, 3)
*/
FIELD(V7M_CSSELR, INDEX, 0, 4)
+/* v7M FPCCR bits */
+FIELD(V7M_FPCCR, LSPACT, 0, 1)
+FIELD(V7M_FPCCR, USER, 1, 1)
+FIELD(V7M_FPCCR, S, 2, 1)
+FIELD(V7M_FPCCR, THREAD, 3, 1)
+FIELD(V7M_FPCCR, HFRDY, 4, 1)
+FIELD(V7M_FPCCR, MMRDY, 5, 1)
+FIELD(V7M_FPCCR, BFRDY, 6, 1)
+FIELD(V7M_FPCCR, SFRDY, 7, 1)
+FIELD(V7M_FPCCR, MONRDY, 8, 1)
+FIELD(V7M_FPCCR, SPLIMVIOL, 9, 1)
+FIELD(V7M_FPCCR, UFRDY, 10, 1)
+FIELD(V7M_FPCCR, RES0, 11, 15)
+FIELD(V7M_FPCCR, TS, 26, 1)
+FIELD(V7M_FPCCR, CLRONRETS, 27, 1)
+FIELD(V7M_FPCCR, CLRONRET, 28, 1)
+FIELD(V7M_FPCCR, LSPENS, 29, 1)
+FIELD(V7M_FPCCR, LSPEN, 30, 1)
+FIELD(V7M_FPCCR, ASPEN, 31, 1)
+/* These bits are banked. Others are non-banked and live in the M_REG_S bank */
+#define R_V7M_FPCCR_BANKED_MASK \
+ (R_V7M_FPCCR_LSPACT_MASK | \
+ R_V7M_FPCCR_USER_MASK | \
+ R_V7M_FPCCR_THREAD_MASK | \
+ R_V7M_FPCCR_MMRDY_MASK | \
+ R_V7M_FPCCR_SPLIMVIOL_MASK | \
+ R_V7M_FPCCR_UFRDY_MASK | \
+ R_V7M_FPCCR_ASPEN_MASK)
+
/*
* System register ID fields.
*/
@@ -1975,6 +2012,18 @@ void armv7m_nvic_set_pending(void *opaque, int irq, bool secure);
*/
void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure);
/**
+ * armv7m_nvic_set_pending_lazyfp: mark this lazy FP exception as pending
+ * @opaque: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Similar to armv7m_nvic_set_pending(), but specifically for exceptions
+ * generated in the course of lazy stacking of FP registers.
+ */
+void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure);
+/**
* armv7m_nvic_get_pending_irq_info: return highest priority pending
* exception, and whether it targets Secure state
* @opaque: the NVIC
@@ -2011,6 +2060,20 @@ void armv7m_nvic_acknowledge_irq(void *opaque);
*/
int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure);
/**
+ * armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
+ * @opaque: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Return whether an exception is "ready", i.e. whether the exception is
+ * enabled and is configured at a priority which would allow it to
+ * interrupt the current execution priority. This controls whether the
+ * RDY bit for it in the FPCCR is set.
+ */
+bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure);
+/**
* armv7m_nvic_raw_execution_priority: return the raw execution priority
* @opaque: the NVIC
*
@@ -2863,6 +2926,13 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
}
}
+/*
+ * Return the MMU index for a v7M CPU with all relevant information
+ * manually specified.
+ */
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
+ bool secstate, bool priv, bool negpri);
+
/* Return the MMU index for a v7M CPU in the specified security and
* privilege state.
*/
@@ -3090,18 +3160,27 @@ FIELD(TBFLAG_ANY, BE_DATA, 23, 1)
FIELD(TBFLAG_A32, THUMB, 0, 1)
FIELD(TBFLAG_A32, VECLEN, 1, 3)
FIELD(TBFLAG_A32, VECSTRIDE, 4, 2)
-FIELD(TBFLAG_A32, VFPEN, 7, 1)
-FIELD(TBFLAG_A32, CONDEXEC, 8, 8)
-FIELD(TBFLAG_A32, SCTLR_B, 16, 1)
-/* We store the bottom two bits of the CPAR as TB flags and handle
- * checks on the other bits at runtime
+/*
+ * We store the bottom two bits of the CPAR as TB flags and handle
+ * checks on the other bits at runtime. This shares the same bits as
+ * VECSTRIDE, which is OK as no XScale CPU has VFP.
*/
-FIELD(TBFLAG_A32, XSCALE_CPAR, 17, 2)
-/* Indicates whether cp register reads and writes by guest code should access
+FIELD(TBFLAG_A32, XSCALE_CPAR, 4, 2)
+/*
+ * Indicates whether cp register reads and writes by guest code should access
* the secure or nonsecure bank of banked registers; note that this is not
* the same thing as the current security state of the processor!
*/
-FIELD(TBFLAG_A32, NS, 19, 1)
+FIELD(TBFLAG_A32, NS, 6, 1)
+FIELD(TBFLAG_A32, VFPEN, 7, 1)
+FIELD(TBFLAG_A32, CONDEXEC, 8, 8)
+FIELD(TBFLAG_A32, SCTLR_B, 16, 1)
+/* For M profile only, set if FPCCR.LSPACT is set */
+FIELD(TBFLAG_A32, LSPACT, 18, 1)
+/* For M profile only, set if we must create a new FP context */
+FIELD(TBFLAG_A32, NEW_FP_CTXT_NEEDED, 19, 1)
+/* For M profile only, set if FPCCR.S does not match current security state */
+FIELD(TBFLAG_A32, FPCCR_S_WRONG, 20, 1)
/* For M profile only, Handler (ie not Thread) mode */
FIELD(TBFLAG_A32, HANDLER, 21, 1)
/* For M profile only, whether we should generate stack-limit checks */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 57ef75b3fc..81a92ab491 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -7378,6 +7378,24 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
g_assert_not_reached();
}
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
{
/* The TT instructions can be used by unprivileged code, but in
@@ -7556,8 +7574,37 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
return target_el;
}
+/*
+ * Return true if the v7M CPACR permits access to the FPU for the specified
+ * security state and privilege level.
+ */
+static bool v7m_cpacr_pass(CPUARMState *env, bool is_secure, bool is_priv)
+{
+ switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
+ case 0:
+ case 2: /* UNPREDICTABLE: we treat like 0 */
+ return false;
+ case 1:
+ return is_priv;
+ case 3:
+ return true;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/*
+ * What kind of stack write are we doing? This affects how exceptions
+ * generated during the stacking are treated.
+ */
+typedef enum StackingMode {
+ STACK_NORMAL,
+ STACK_IGNFAULTS,
+ STACK_LAZYFP,
+} StackingMode;
+
static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
- ARMMMUIdx mmu_idx, bool ignfault)
+ ARMMMUIdx mmu_idx, StackingMode mode)
{
CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
@@ -7575,15 +7622,31 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
&attrs, &prot, &page_size, &fi, NULL)) {
/* MPU/SAU lookup failed */
if (fi.type == ARMFault_QEMU_SFault) {
- qemu_log_mask(CPU_LOG_INT,
- "...SecureFault with SFSR.AUVIOL during stacking\n");
- env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
+ if (mode == STACK_LAZYFP) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...SecureFault with SFSR.LSPERR "
+ "during lazy stacking\n");
+ env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ "...SecureFault with SFSR.AUVIOL "
+ "during stacking\n");
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
+ }
+ env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
env->v7m.sfar = addr;
exc = ARMV7M_EXCP_SECURE;
exc_secure = false;
} else {
- qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n");
- env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
+ if (mode == STACK_LAZYFP) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...MemManageFault with CFSR.MLSPERR\n");
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ "...MemManageFault with CFSR.MSTKERR\n");
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
+ }
exc = ARMV7M_EXCP_MEM;
exc_secure = secure;
}
@@ -7593,8 +7656,13 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
attrs, &txres);
if (txres != MEMTX_OK) {
/* BusFault trying to write the data */
- qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
- env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
+ if (mode == STACK_LAZYFP) {
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
+ } else {
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
+ }
exc = ARMV7M_EXCP_BUS;
exc_secure = false;
goto pend_fault;
@@ -7609,11 +7677,19 @@ pend_fault:
* later if we have two derived exceptions.
* The only case when we must not pend the exception but instead
* throw it away is if we are doing the push of the callee registers
- * and we've already generated a derived exception. Even in this
- * case we will still update the fault status registers.
+ * and we've already generated a derived exception (this is indicated
+ * by the caller passing STACK_IGNFAULTS). Even in this case we will
+ * still update the fault status registers.
*/
- if (!ignfault) {
+ switch (mode) {
+ case STACK_NORMAL:
armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
+ break;
+ case STACK_LAZYFP:
+ armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
+ break;
+ case STACK_IGNFAULTS:
+ break;
}
return false;
}
@@ -7679,6 +7755,97 @@ pend_fault:
return false;
}
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
+{
+ /*
+ * Preserve FP state (because LSPACT was set and we are about
+ * to execute an FP instruction). This corresponds to the
+ * PreserveFPState() pseudocode.
+ * We may throw an exception if the stacking fails.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
+ bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
+ bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
+ uint32_t fpcar = env->v7m.fpcar[is_secure];
+ bool stacked_ok = true;
+ bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
+ bool take_exception;
+
+ /* Take the iothread lock as we are going to touch the NVIC */
+ qemu_mutex_lock_iothread();
+
+ /* Check the background context had access to the FPU */
+ if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
+ env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
+ stacked_ok = false;
+ } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
+ stacked_ok = false;
+ }
+
+ if (!splimviol && stacked_ok) {
+ /* We only stack if the stack limit wasn't violated */
+ int i;
+ ARMMMUIdx mmu_idx;
+
+ mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
+ uint32_t faddr = fpcar + 4 * i;
+ uint32_t slo = extract64(dn, 0, 32);
+ uint32_t shi = extract64(dn, 32, 32);
+
+ if (i >= 16) {
+ faddr += 8; /* skip the slot for the FPSCR */
+ }
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
+ v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
+ }
+
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, fpcar + 0x40,
+ vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
+ }
+
+ /*
+ * We definitely pended an exception, but it's possible that it
+ * might not be able to be taken now. If its priority permits us
+ * to take it now, then we must not update the LSPACT or FP regs,
+ * but instead jump out to take the exception immediately.
+ * If it's just pending and won't be taken until the current
+ * handler exits, then we do update LSPACT and the FP regs.
+ */
+ take_exception = !stacked_ok &&
+ armv7m_nvic_can_take_pending_exception(env->nvic);
+
+ qemu_mutex_unlock_iothread();
+
+ if (take_exception) {
+ raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
+ }
+
+ env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
+
+ if (ts) {
+ /* Clear s0 to s31 and the FPSCR */
+ int i;
+
+ for (i = 0; i < 32; i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ }
+ /*
+ * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
+ * unchanged.
+ */
+}
+
/* Write to v7M CONTROL.SPSEL bit for the specified security bank.
* This may change the current stack pointer between Main and Process
* stack pointers if it is done for the CONTROL register for the current
@@ -7800,6 +7967,9 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
/* translate.c should have made BXNS UNDEF unless we're secure */
assert(env->v7m.secure);
+ if (!(dest & 1)) {
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
+ }
switch_v7m_security_state(env, dest & 1);
env->thumb = 1;
env->regs[15] = dest & ~1;
@@ -7857,6 +8027,7 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
*/
write_v7m_exception(env, 1);
}
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
switch_v7m_security_state(env, 0);
env->thumb = 1;
env->regs[15] = dest;
@@ -7956,6 +8127,21 @@ load_fail:
return false;
}
+static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
+{
+ /*
+ * Return the integrity signature value for the callee-saves
+ * stack frame section. @lr is the exception return payload/LR value
+ * whose FType bit forms bit 0 of the signature if FP is present.
+ */
+ uint32_t sig = 0xfefa125a;
+
+ if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
+ sig |= 1;
+ }
+ return sig;
+}
+
static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool ignore_faults)
{
@@ -7970,6 +8156,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool stacked_ok;
uint32_t limit;
bool want_psp;
+ uint32_t sig;
+ StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
if (dotailchain) {
bool mode = lr & R_V7M_EXCRET_MODE_MASK;
@@ -8011,24 +8199,17 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
/* Write as much of the stack frame as we can. A write failure may
* cause us to pend a derived exception.
*/
+ sig = v7m_integrity_sig(env, lr);
stacked_ok =
- v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) &&
- v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx,
- ignore_faults) &&
- v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx,
- ignore_faults) &&
- v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx,
- ignore_faults) &&
- v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx,
- ignore_faults) &&
- v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx,
- ignore_faults) &&
- v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx,
- ignore_faults) &&
- v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx,
- ignore_faults) &&
- v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx,
- ignore_faults);
+ v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
/* Update SP regardless of whether any of the stack accesses failed. */
*frame_sp_p = frameptr;
@@ -8053,6 +8234,14 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
targets_secure ? "secure" : "nonsecure", exc);
+ if (dotailchain) {
+ /* Sanitize LR FType and PREFIX bits */
+ if (!arm_feature(env, ARM_FEATURE_VFP)) {
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
+ }
+ lr = deposit32(lr, 24, 8, 0xff);
+ }
+
if (arm_feature(env, ARM_FEATURE_V8)) {
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
(lr & R_V7M_EXCRET_S_MASK)) {
@@ -8148,6 +8337,9 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
switch_v7m_security_state(env, targets_secure);
write_v7m_control_spsel(env, 0);
arm_clear_exclusive(env);
+ /* Clear SFPA and FPCA (has no effect if no FPU) */
+ env->v7m.control[M_REG_S] &=
+ ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
/* Clear IT bits */
env->condexec_bits = 0;
env->regs[14] = lr;
@@ -8155,6 +8347,187 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
env->thumb = addr & 1;
}
+static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
+ bool apply_splim)
+{
+ /*
+ * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
+ * that we will need later in order to do lazy FP reg stacking.
+ */
+ bool is_secure = env->v7m.secure;
+ void *nvic = env->nvic;
+ /*
+ * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
+ * are banked and we want to update the bit in the bank for the
+ * current security state; and in one case we want to specifically
+ * update the NS banked version of a bit even if we are secure.
+ */
+ uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
+ uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
+ uint32_t *fpccr = &env->v7m.fpccr[is_secure];
+ bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
+
+ env->v7m.fpcar[is_secure] = frameptr & ~0x7;
+
+ if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
+ bool splimviol;
+ uint32_t splim = v7m_sp_limit(env);
+ bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
+ (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
+
+ splimviol = !ign && frameptr < splim;
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
+ }
+
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
+
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
+
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
+
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
+ !arm_v7m_is_handler_mode(env));
+
+ hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
+
+ bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
+
+ mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
+
+ ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
+ *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
+
+ monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
+
+ sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
+ }
+}
+
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
+{
+ /* fptr is the value of Rn, the frame pointer we store the FP regs to */
+ bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
+
+ assert(env->v7m.secure);
+
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
+ return;
+ }
+
+ /* Check access to the coprocessor is permitted */
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
+ }
+
+ if (lspact) {
+ /* LSPACT should not be active when there is active FP state */
+ raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
+ }
+
+ if (fptr & 7) {
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
+ }
+
+ /*
+ * Note that we do not use v7m_stack_write() here, because the
+ * accesses should not set the FSR bits for stacking errors if they
+ * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
+ * or AccType_LAZYFP). Faults in cpu_stl_data() will throw exceptions
+ * and longjmp out.
+ */
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
+ int i;
+
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
+ uint32_t faddr = fptr + 4 * i;
+ uint32_t slo = extract64(dn, 0, 32);
+ uint32_t shi = extract64(dn, 32, 32);
+
+ if (i >= 16) {
+ faddr += 8; /* skip the slot for the FPSCR */
+ }
+ cpu_stl_data(env, faddr, slo);
+ cpu_stl_data(env, faddr + 4, shi);
+ }
+ cpu_stl_data(env, fptr + 0x40, vfp_get_fpscr(env));
+
+ /*
+ * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
+ * leave them unchanged, matching our choice in v7m_preserve_fp_state.
+ */
+ if (ts) {
+ for (i = 0; i < 32; i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ }
+ } else {
+ v7m_update_fpccr(env, fptr, false);
+ }
+
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
+}
+
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
+{
+ /* fptr is the value of Rn, the frame pointer we load the FP regs from */
+ assert(env->v7m.secure);
+
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
+ return;
+ }
+
+ /* Check access to the coprocessor is permitted */
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
+ }
+
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
+ /* State in FP is still valid */
+ env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
+ } else {
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
+ int i;
+ uint32_t fpscr;
+
+ if (fptr & 7) {
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
+ }
+
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
+ uint32_t slo, shi;
+ uint64_t dn;
+ uint32_t faddr = fptr + 4 * i;
+
+ if (i >= 16) {
+ faddr += 8; /* skip the slot for the FPSCR */
+ }
+
+ slo = cpu_ldl_data(env, faddr);
+ shi = cpu_ldl_data(env, faddr + 4);
+
+ dn = (uint64_t) shi << 32 | slo;
+ *aa32_vfp_dreg(env, i / 2) = dn;
+ }
+ fpscr = cpu_ldl_data(env, fptr + 0x40);
+ vfp_set_fpscr(env, fpscr);
+ }
+
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
+}
+
static bool v7m_push_stack(ARMCPU *cpu)
{
/* Do the "set up stack frame" part of exception entry,
@@ -8163,11 +8536,25 @@ static bool v7m_push_stack(ARMCPU *cpu)
* should ignore further stack faults trying to process
* that derived exception.)
*/
- bool stacked_ok;
+ bool stacked_ok = true, limitviol = false;
CPUARMState *env = &cpu->env;
uint32_t xpsr = xpsr_read(env);
uint32_t frameptr = env->regs[13];
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ uint32_t framesize;
+ bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
+
+ if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
+ (env->v7m.secure || nsacr_cp10)) {
+ if (env->v7m.secure &&
+ env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
+ framesize = 0xa8;
+ } else {
+ framesize = 0x68;
+ }
+ } else {
+ framesize = 0x20;
+ }
/* Align stack pointer if the guest wants that */
if ((frameptr & 4) &&
@@ -8176,7 +8563,13 @@ static bool v7m_push_stack(ARMCPU *cpu)
xpsr |= XPSR_SPREALIGN;
}
- frameptr -= 0x20;
+ xpsr &= ~XPSR_SFPA;
+ if (env->v7m.secure &&
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
+ xpsr |= XPSR_SFPA;
+ }
+
+ frameptr -= framesize;
if (arm_feature(env, ARM_FEATURE_V8)) {
uint32_t limit = v7m_sp_limit(env);
@@ -8194,7 +8587,14 @@ static bool v7m_push_stack(ARMCPU *cpu)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
env->v7m.secure);
env->regs[13] = limit;
- return true;
+ /*
+ * We won't try to perform any further memory accesses but
+ * we must continue through the following code to check for
+ * permission faults during FPU state preservation, and we
+ * must update FPCCR if lazy stacking is enabled.
+ */
+ limitviol = true;
+ stacked_ok = false;
}
}
@@ -8203,18 +8603,99 @@ static bool v7m_push_stack(ARMCPU *cpu)
* (which may be taken in preference to the one we started with
* if it has higher priority).
*/
- stacked_ok =
- v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) &&
- v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) &&
- v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) &&
- v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) &&
- v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) &&
- v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) &&
- v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) &&
- v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false);
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 4, env->regs[1],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 8, env->regs[2],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 12, env->regs[3],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 16, env->regs[12],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 20, env->regs[14],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 24, env->regs[15],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
+
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
+ /* FPU is active, try to save its registers */
+ bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
+
+ if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...SecureFault because LSPACT and FPCA both set\n");
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ } else if (!env->v7m.secure && !nsacr_cp10) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...Secure UsageFault with CFSR.NOCP because "
+ "NSACR.CP10 prevents stacking FP regs\n");
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
+ } else {
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
+ /* Lazy stacking disabled, save registers now */
+ int i;
+ bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
+ arm_current_el(env) != 0);
- /* Update SP regardless of whether any of the stack accesses failed. */
- env->regs[13] = frameptr;
+ if (stacked_ok && !cpacr_pass) {
+ /*
+ * Take UsageFault if CPACR forbids access. The pseudocode
+ * here does a full CheckCPEnabled() but we know the NSACR
+ * check can never fail as we have already handled that.
+ */
+ qemu_log_mask(CPU_LOG_INT,
+ "...UsageFault with CFSR.NOCP because "
+ "CPACR.CP10 prevents stacking FP regs\n");
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
+ stacked_ok = false;
+ }
+
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
+ uint32_t slo = extract64(dn, 0, 32);
+ uint32_t shi = extract64(dn, 32, 32);
+
+ if (i >= 16) {
+ faddr += 8; /* skip the slot for the FPSCR */
+ }
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, faddr, slo,
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, faddr + 4, shi,
+ mmu_idx, STACK_NORMAL);
+ }
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, frameptr + 0x60,
+ vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
+ if (cpacr_pass) {
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ }
+ } else {
+ /* Lazy stacking enabled, save necessary info to stack later */
+ v7m_update_fpccr(env, frameptr + 0x20, true);
+ }
+ }
+ }
+
+ /*
+ * If we broke a stack limit then SP was already updated earlier;
+ * otherwise we update SP regardless of whether any of the stack
+ * accesses failed or we took some other kind of fault.
+ */
+ if (!limitviol) {
+ env->regs[13] = frameptr;
+ }
return !stacked_ok;
}
@@ -8231,6 +8712,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
bool rettobase = false;
bool exc_secure = false;
bool return_to_secure;
+ bool ftype;
+ bool restore_s16_s31;
/* If we're not in Handler mode then jumps to magic exception-exit
* addresses don't have magic behaviour. However for the v8M
@@ -8268,6 +8751,16 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
excret);
}
+ ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
+
+ if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
+ "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
+ "if FPU not present\n",
+ excret);
+ ftype = true;
+ }
+
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
/* EXC_RETURN.ES validation check (R_SMFL). We must do this before
* we pick which FAULTMASK to clear.
@@ -8368,6 +8861,30 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
*/
write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
+ /*
+ * Clear scratch FP values left in caller saved registers; this
+ * must happen before any kind of tail chaining.
+ */
+ if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: error during lazy state deactivation\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ } else {
+ /* Clear s0..s15 and FPSCR */
+ int i;
+
+ for (i = 0; i < 16; i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ }
+ }
+
if (sfault) {
env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
@@ -8441,12 +8958,11 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
if (return_to_secure &&
((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
(excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
- uint32_t expected_sig = 0xfefa125b;
uint32_t actual_sig;
pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
- if (pop_ok && expected_sig != actual_sig) {
+ if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
/* Take a SecureFault on the current stack */
env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
@@ -8530,8 +9046,105 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
}
+ if (!ftype) {
+ /* FP present and we need to handle it */
+ if (!return_to_secure &&
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...taking SecureFault on existing stackframe: "
+ "Secure LSPACT set but exception return is "
+ "not to secure state\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
+ restore_s16_s31 = return_to_secure &&
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
+
+ if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
+ /* State in FPU is still valid, just clear LSPACT */
+ env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
+ } else {
+ int i;
+ uint32_t fpscr;
+ bool cpacr_pass, nsacr_pass;
+
+ cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
+ return_to_priv);
+ nsacr_pass = return_to_secure ||
+ extract32(env->v7m.nsacr, 10, 1);
+
+ if (!cpacr_pass) {
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ return_to_secure);
+ env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...taking UsageFault on existing "
+ "stackframe: CPACR.CP10 prevents unstacking "
+ "FP regs\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ } else if (!nsacr_pass) {
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...taking Secure UsageFault on existing "
+ "stackframe: NSACR.CP10 prevents unstacking "
+ "FP regs\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
+ uint32_t slo, shi;
+ uint64_t dn;
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
+
+ if (i >= 16) {
+ faddr += 8; /* Skip the slot for the FPSCR */
+ }
+
+ pop_ok = pop_ok &&
+ v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
+ v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
+
+ if (!pop_ok) {
+ break;
+ }
+
+ dn = (uint64_t)shi << 32 | slo;
+ *aa32_vfp_dreg(env, i / 2) = dn;
+ }
+ pop_ok = pop_ok &&
+ v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
+ if (pop_ok) {
+ vfp_set_fpscr(env, fpscr);
+ }
+ if (!pop_ok) {
+ /*
+ * These regs are 0 if security extension present;
+ * otherwise merely UNKNOWN. We zero always.
+ */
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ }
+ }
+ }
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
+ V7M_CONTROL, FPCA, !ftype);
+
/* Commit to consuming the stack frame */
frameptr += 0x20;
+ if (!ftype) {
+ frameptr += 0x48;
+ if (restore_s16_s31) {
+ frameptr += 0x40;
+ }
+ }
/* Undo stack alignment (the SPREALIGN bit indicates that the original
* pre-exception SP was not 8-aligned and we added a padding word to
* align it, so we undo this by ORing in the bit that increases it
@@ -8544,7 +9157,14 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
*frame_sp_p = frameptr;
}
/* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
- xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
+ xpsr_write(env, xpsr, ~(XPSR_SPREALIGN | XPSR_SFPA));
+
+ if (env->v7m.secure) {
+ bool sfpa = xpsr & XPSR_SFPA;
+
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
+ V7M_CONTROL, SFPA, sfpa);
+ }
/* The restored xPSR exception field will be zero if we're
* resuming in Thread mode. If that doesn't match what the
@@ -8667,6 +9287,9 @@ static void arm_log_exception(int idx)
[EXCP_NOCP] = "v7M NOCP UsageFault",
[EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
[EXCP_STKOF] = "v8M STKOF UsageFault",
+ [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
+ [EXCP_LSERR] = "v8M LSERR UsageFault",
+ [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@@ -8785,6 +9408,7 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
", executing it\n", env->regs[15]);
env->regs[14] &= ~1;
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
switch_v7m_security_state(env, true);
xpsr_write(env, 0, XPSR_IT);
env->regs[15] += 4;
@@ -8815,9 +9439,23 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
break;
case EXCP_NOCP:
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
- env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
+ {
+ /*
+ * NOCP might be directed to something other than the current
+ * security state if this fault is because of NSACR; we indicate
+ * the target security state using exception.target_el.
+ */
+ int target_secstate;
+
+ if (env->exception.target_el == 3) {
+ target_secstate = M_REG_S;
+ } else {
+ target_secstate = env->v7m.secure;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
+ env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
break;
+ }
case EXCP_INVSTATE:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
@@ -8826,6 +9464,14 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
break;
+ case EXCP_LSERR:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
+ break;
+ case EXCP_UNALIGNED:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
+ break;
case EXCP_SWI:
/* The PC already points to the next instruction. */
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
@@ -8945,6 +9591,12 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
return;
}
break;
+ case EXCP_LAZYFP:
+ /*
+ * We already pended the specific exception in the NVIC in the
+ * v7m_preserve_fp_state() helper function.
+ */
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
@@ -8952,8 +9604,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
if (arm_feature(env, ARM_FEATURE_V8)) {
lr = R_V7M_EXCRET_RES1_MASK |
- R_V7M_EXCRET_DCRS_MASK |
- R_V7M_EXCRET_FTYPE_MASK;
+ R_V7M_EXCRET_DCRS_MASK;
/* The S bit indicates whether we should return to Secure
* or NonSecure (ie our current state).
* The ES bit indicates whether we're taking this exception
@@ -8968,6 +9619,9 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
if (env->v7m.secure) {
lr |= R_V7M_EXCRET_S_MASK;
}
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
+ }
} else {
lr = R_V7M_EXCRET_RES1_MASK |
R_V7M_EXCRET_S_MASK |
@@ -11993,7 +12647,14 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
return xpsr_read(env) & mask;
break;
case 20: /* CONTROL */
- return env->v7m.control[env->v7m.secure];
+ {
+ uint32_t value = env->v7m.control[env->v7m.secure];
+ if (!env->v7m.secure) {
+ /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
+ value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
+ }
+ return value;
+ }
case 0x94: /* CONTROL_NS */
/* We have to handle this here because unprivileged Secure code
* can read the NS CONTROL register.
@@ -12001,7 +12662,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
if (!env->v7m.secure) {
return 0;
}
- return env->v7m.control[M_REG_NS];
+ return env->v7m.control[M_REG_NS] |
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
}
if (el == 0) {
@@ -12107,9 +12769,13 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
*/
uint32_t mask = extract32(maskreg, 8, 4);
uint32_t reg = extract32(maskreg, 0, 8);
+ int cur_el = arm_current_el(env);
- if (arm_current_el(env) == 0 && reg > 7) {
- /* only xPSR sub-fields may be written by unprivileged */
+ if (cur_el == 0 && reg > 7 && reg != 20) {
+ /*
+ * only xPSR sub-fields and CONTROL.SFPA may be written by
+ * unprivileged code
+ */
return;
}
@@ -12168,6 +12834,15 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
}
+ /*
+ * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
+ * RES0 if the FPU is not present, and is stored in the S bank
+ */
+ if (arm_feature(env, ARM_FEATURE_VFP) &&
+ extract32(env->v7m.nsacr, 10, 1)) {
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
+ }
return;
case 0x98: /* SP_NS */
{
@@ -12270,21 +12945,41 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
env->v7m.faultmask[env->v7m.secure] = val & 1;
break;
case 20: /* CONTROL */
- /* Writing to the SPSEL bit only has an effect if we are in
+ /*
+ * Writing to the SPSEL bit only has an effect if we are in
* thread mode; other bits can be updated by any privileged code.
* write_v7m_control_spsel() deals with updating the SPSEL bit in
* env->v7m.control, so we only need update the others.
* For v7M, we must just ignore explicit writes to SPSEL in handler
* mode; for v8M the write is permitted but will have no effect.
+ * All these bits are writes-ignored from non-privileged code,
+ * except for SFPA.
*/
- if (arm_feature(env, ARM_FEATURE_V8) ||
- !arm_v7m_is_handler_mode(env)) {
+ if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
+ !arm_v7m_is_handler_mode(env))) {
write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
}
- if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
}
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ /*
+ * SFPA is RAZ/WI from NS or if no FPU.
+ * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
+ * Both are stored in the S bank.
+ */
+ if (env->v7m.secure) {
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
+ }
+ if (cur_el > 0 &&
+ (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
+ extract32(env->v7m.nsacr, 10, 1))) {
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
+ }
+ }
break;
default:
bad_reg:
@@ -12751,6 +13446,22 @@ int fp_exception_el(CPUARMState *env, int cur_el)
return 0;
}
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /* CPACR can cause a NOCP UsageFault taken to current security state */
+ if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
+ return 1;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
+ if (!extract32(env->v7m.nsacr, 10, 1)) {
+ /* FP insns cause a NOCP UsageFault taken to Secure */
+ return 3;
+ }
+ }
+
+ return 0;
+ }
+
/* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
* 0, 2 : trap EL0 and EL1/PL1 accesses
* 1 : trap only EL0 accesses
@@ -12801,8 +13512,8 @@ int fp_exception_el(CPUARMState *env, int cur_el)
return 0;
}
-ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
- bool secstate, bool priv)
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
+ bool secstate, bool priv, bool negpri)
{
ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
@@ -12810,7 +13521,7 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
mmu_idx |= ARM_MMU_IDX_M_PRIV;
}
- if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) {
+ if (negpri) {
mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
}
@@ -12821,6 +13532,14 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
return mmu_idx;
}
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
+ bool secstate, bool priv)
+{
+ bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
+
+ return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
+}
+
/* Return the MMU index for a v7M CPU in the specified security state */
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
{
@@ -12938,10 +13657,14 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env));
flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
- || arm_el_is_aa64(env, 1)) {
+ || arm_el_is_aa64(env, 1) || arm_feature(env, ARM_FEATURE_M)) {
flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
}
- flags = FIELD_DP32(flags, TBFLAG_A32, XSCALE_CPAR, env->cp15.c15_cpar);
+ /* Note that XSCALE_CPAR shares bits with VECSTRIDE */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ flags = FIELD_DP32(flags, TBFLAG_A32,
+ XSCALE_CPAR, env->cp15.c15_cpar);
+ }
}
flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
@@ -12984,6 +13707,32 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
}
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) != env->v7m.secure) {
+ flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M) &&
+ (env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
+ (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
+ (env->v7m.secure &&
+ !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
+ /*
+ * ASPEN is set, but FPCA/SFPA indicate that there is no active
+ * FP context; we must create a new FP context before executing
+ * any FP insn.
+ */
+ flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+
+ if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
+ flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
+ }
+ }
+
*pflags = flags;
*cs_base = 0;
}
diff --git a/target/arm/helper.h b/target/arm/helper.h
index a09566f795..50cb036378 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -69,6 +69,11 @@ DEF_HELPER_2(v7m_blxns, void, env, i32)
DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
+DEF_HELPER_1(v7m_preserve_fp_state, void, env)
+
+DEF_HELPER_2(v7m_vlstm, void, env, i32)
+DEF_HELPER_2(v7m_vlldm, void, env, i32)
+
DEF_HELPER_2(v8m_stackcheck, void, env, i32)
DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
diff --git a/target/arm/machine.c b/target/arm/machine.c
index b292549614..09567d4fc6 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -305,6 +305,21 @@ static const VMStateDescription vmstate_m_v8m = {
}
};
+static const VMStateDescription vmstate_m_fp = {
+ .name = "cpu/m/fp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vfp_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_m = {
.name = "cpu/m",
.version_id = 4,
@@ -330,6 +345,7 @@ static const VMStateDescription vmstate_m = {
&vmstate_m_scr,
&vmstate_m_other_sp,
&vmstate_m_v8m,
+ &vmstate_m_fp,
NULL
}
};
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 4ea4018e2b..10bc53f91c 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -3399,8 +3399,14 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
if (s->fp_excp_el) {
- gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
+ s->fp_excp_el);
+ } else {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, false),
+ s->fp_excp_el);
+ }
return 0;
}
@@ -3415,6 +3421,73 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
}
}
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ /* Handle M-profile lazy FP state mechanics */
+
+ /* Trigger lazy-state preservation if necessary */
+ if (s->v7m_lspact) {
+ /*
+ * Lazy state saving affects external memory and also the NVIC,
+ * so we must mark it as an IO operation for icount.
+ */
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_v7m_preserve_fp_state(cpu_env);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
+ /*
+ * If the preserve_fp_state helper doesn't throw an exception
+ * then it will clear LSPACT; we don't need to repeat this for
+ * any further FP insns in this TB.
+ */
+ s->v7m_lspact = false;
+ }
+
+ /* Update ownership of FP context: set FPCCR.S to match current state */
+ if (s->v8m_fpccr_s_wrong) {
+ TCGv_i32 tmp;
+
+ tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
+ if (s->v8m_secure) {
+ tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
+ } else {
+ tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
+ }
+ store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
+ /* Don't need to do this for any further FP insns in this TB */
+ s->v8m_fpccr_s_wrong = false;
+ }
+
+ if (s->v7m_new_fp_ctxt_needed) {
+ /*
+ * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
+ * and the FPSCR.
+ */
+ TCGv_i32 control, fpscr;
+ uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
+
+ fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
+ tcg_temp_free_i32(fpscr);
+ /*
+ * We don't need to arrange to end the TB, because the only
+ * parts of FPSCR which we cache in the TB flags are the VECLEN
+ * and VECSTRIDE, and those don't exist for M-profile.
+ */
+
+ if (s->v8m_secure) {
+ bits |= R_V7M_CONTROL_SFPA_MASK;
+ }
+ control = load_cpu_field(v7m.control[M_REG_S]);
+ tcg_gen_ori_i32(control, control, bits);
+ store_cpu_field(control, v7m.control[M_REG_S]);
+ /* Don't need to do this for any further FP insns in this TB */
+ s->v7m_new_fp_ctxt_needed = false;
+ }
+ }
+
if (extract32(insn, 28, 4) == 0xf) {
/*
* Encodings with T=1 (Thumb) or unconditional (ARM):
@@ -3513,12 +3586,27 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
}
}
} else { /* !dp */
+ bool is_sysreg;
+
if ((insn & 0x6f) != 0x00)
return 1;
rn = VFP_SREG_N(insn);
+
+ is_sysreg = extract32(insn, 21, 1);
+
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ /*
+ * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
+ * Writes to R15 are UNPREDICTABLE; we choose to undef.
+ */
+ if (is_sysreg && (rd == 15 || (rn >> 1) != ARM_VFP_FPSCR)) {
+ return 1;
+ }
+ }
+
if (insn & ARM_CP_RW_BIT) {
/* vfp->arm */
- if (insn & (1 << 21)) {
+ if (is_sysreg) {
/* system register */
rn >>= 1;
@@ -3585,7 +3673,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
}
} else {
/* arm->vfp */
- if (insn & (1 << 21)) {
+ if (is_sysreg) {
rn >>= 1;
/* system register */
switch (rn) {
@@ -11707,10 +11795,19 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
case 6: case 7: case 14: case 15:
/* Coprocessor. */
if (arm_dc_feature(s, ARM_FEATURE_M)) {
- /* We don't currently implement M profile FP support,
- * so this entire space should give a NOCP fault, with
- * the exception of the v8M VLLDM and VLSTM insns, which
- * must be NOPs in Secure state and UNDEF in Nonsecure state.
+ /* 0b111x_11xx_xxxx_xxxx_xxxx_xxxx_xxxx_xxxx */
+ if (extract32(insn, 24, 2) == 3) {
+ goto illegal_op; /* op0 = 0b11 : unallocated */
+ }
+
+ /*
+ * Decode VLLDM and VLSTM first: these are nonstandard because:
+ * * if there is no FPU then these insns must NOP in
+ * Secure state and UNDEF in Nonsecure state
+ * * if there is an FPU then these insns do not have
+ * the usual behaviour that disas_vfp_insn() provides of
+ * being controlled by CPACR/NSACR enable bits or the
+ * lazy-stacking logic.
*/
if (arm_dc_feature(s, ARM_FEATURE_V8) &&
(insn & 0xffa00f00) == 0xec200a00) {
@@ -11721,9 +11818,31 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
if (!s->v8m_secure || (insn & 0x0040f0ff)) {
goto illegal_op;
}
- /* Just NOP since FP support is not implemented */
+
+ if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
+ TCGv_i32 fptr = load_reg(s, rn);
+
+ if (extract32(insn, 20, 1)) {
+ gen_helper_v7m_vlldm(cpu_env, fptr);
+ } else {
+ gen_helper_v7m_vlstm(cpu_env, fptr);
+ }
+ tcg_temp_free_i32(fptr);
+
+ /* End the TB, because we have updated FP control bits */
+ s->base.is_jmp = DISAS_UPDATE;
+ }
break;
}
+ if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
+ ((insn >> 8) & 0xe) == 10) {
+ /* FP, and the CPU supports it */
+ if (disas_vfp_insn(s, insn)) {
+ goto illegal_op;
+ }
+ break;
+ }
+
/* All other insns: NOCP */
gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
default_exception_el(s));
@@ -13291,12 +13410,21 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
- dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
- dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
+ dc->vec_stride = 0;
+ } else {
+ dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
+ dc->c15_cpar = 0;
+ }
dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
regime_is_secure(env, dc->mmu_idx);
dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
+ dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
+ dc->v7m_new_fp_ctxt_needed =
+ FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
+ dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
dc->cp_regs = cpu->cp_regs;
dc->features = env->features;
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 984617786d..c2348def0d 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -40,6 +40,9 @@ typedef struct DisasContext {
bool v7m_handler_mode;
bool v8m_secure; /* true if v8M and we're in Secure mode */
bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */
+ bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */
+ bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */
+ bool v7m_lspact; /* FPCCR.LSPACT set */
/* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
* so that top level loop can generate correct syndrome information.
*/
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
index 2468fc1629..7a46d99148 100644
--- a/target/arm/vfp_helper.c
+++ b/target/arm/vfp_helper.c
@@ -105,6 +105,14 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
val &= ~FPCR_FZ16;
}
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /*
+ * M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits
+ * and also for the trapped-exception-handling bits IxE.
+ */
+ val &= 0xf7c0009f;
+ }
+
/*
* We don't implement trapped exception handling, so the
* trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)