aboutsummaryrefslogtreecommitdiff
path: root/target/arm/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/cpu.h')
-rw-r--r--target/arm/cpu.h50
1 files changed, 32 insertions, 18 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index a12859fc53..9a3fd59562 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -2772,14 +2772,19 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* + NonSecure EL1 & 0 stage 2
* + NonSecure EL2
* + NonSecure EL2 & 0 (ARMv8.1-VHE)
- * + Secure EL1 & 0
- * + Secure EL3
+ * + Secure EL1 & 0 stage 1
+ * + Secure EL1 & 0 stage 2 (FEAT_SEL2)
+ * + Secure EL2 (FEAT_SEL2)
+ * + Secure EL2 & 0 (FEAT_SEL2)
+ * + Realm EL1 & 0 stage 1 (FEAT_RME)
+ * + Realm EL1 & 0 stage 2 (FEAT_RME)
+ * + Realm EL2 (FEAT_RME)
+ * + EL3
* If EL3 is 32-bit:
* + NonSecure PL1 & 0 stage 1
* + NonSecure PL1 & 0 stage 2
* + NonSecure PL2
- * + Secure PL0
- * + Secure PL1
+ * + Secure PL1 & 0
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
*
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
@@ -2797,37 +2802,42 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* The only use of stage 2 translations is either as part of an s1+2
* lookup or when loading the descriptors during a stage 1 page table walk,
* and in both those cases we don't use the TLB.
- * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
- * translation regimes, because they map reasonably well to each other
- * and they can't both be active at the same time.
- * 5. we want to be able to use the TLB for accesses done as part of a
+ * 4. we want to be able to use the TLB for accesses done as part of a
* stage1 page table walk, rather than having to walk the stage2 page
* table over and over.
- * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
+ * 5. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
* Never (PAN) bit within PSTATE.
- * 7. we fold together the secure and non-secure regimes for A-profile,
+ * 6. we fold together most secure and non-secure regimes for A-profile,
* because there are no banked system registers for aarch64, so the
* process of switching between secure and non-secure is
* already heavyweight.
+ * 7. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
+ * because both are in use simultaneously for Secure EL2.
*
* This gives us the following list of cases:
*
- * EL0 EL1&0 stage 1+2 (aka NS PL0)
- * EL1 EL1&0 stage 1+2 (aka NS PL1)
- * EL1 EL1&0 stage 1+2 +PAN
+ * EL0 EL1&0 stage 1+2 (or AArch32 PL0 PL1&0 stage 1+2)
+ * EL1 EL1&0 stage 1+2 (or AArch32 PL1 PL1&0 stage 1+2)
+ * EL1 EL1&0 stage 1+2 +PAN (or AArch32 PL1 PL1&0 stage 1+2 +PAN)
* EL0 EL2&0
* EL2 EL2&0
* EL2 EL2&0 +PAN
* EL2 (aka NS PL2)
- * EL3 (aka S PL1)
- * Physical (NS & S)
- * Stage2 (NS & S)
+ * EL3 (not used when EL3 is AArch32)
+ * Stage2 Secure
+ * Stage2 NonSecure
+ * plus one TLB per Physical address space: S, NS, Realm, Root
*
- * for a total of 12 different mmu_idx.
+ * for a total of 14 different mmu_idx.
+ *
+ * Note that when EL3 is AArch32, the usage is potentially confusing
+ * because the MMU indexes are named for their AArch64 use, so code
+ * using the ARMMMUIdx_E10_1 might be at EL3, not EL1. This is because
+ * Secure PL1 is always at EL3.
*
* R profile CPUs have an MPU, but can use the same set of MMU indexes
* as A profile. They only need to distinguish EL0 and EL1 (and
- * EL2 if we ever model a Cortex-R52).
+ * EL2 for cores like the Cortex-R52).
*
* M profile CPUs are rather different as they do not have a true MMU.
* They have the following different MMU indexes:
@@ -3117,6 +3127,10 @@ FIELD(TBFLAG_A32, NS, 10, 1)
* This requires an SME trap from AArch32 mode when using NEON.
*/
FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
+/*
+ * Indicates whether we are in the Secure PL1&0 translation regime
+ */
+FIELD(TBFLAG_A32, S_PL1_0, 12, 1)
/*
* Bit usage when in AArch32 state, for M-profile only.