aboutsummaryrefslogtreecommitdiff
path: root/target/arm/ptw.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/ptw.c')
-rw-r--r--target/arm/ptw.c128
1 files changed, 128 insertions, 0 deletions
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index d754273fa1..af9ad42028 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -487,6 +487,134 @@ do_fault:
return true;
}
+/*
+ * Translate S2 section/page access permissions to protection flags
+ * @env: CPUARMState
+ * @s2ap: The 2-bit stage2 access permissions (S2AP)
+ * @xn: XN (execute-never) bits
+ * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
+ */
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
+{
+ int prot = 0;
+
+ if (s2ap & 1) {
+ prot |= PAGE_READ;
+ }
+ if (s2ap & 2) {
+ prot |= PAGE_WRITE;
+ }
+
+ if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
+ switch (xn) {
+ case 0:
+ prot |= PAGE_EXEC;
+ break;
+ case 1:
+ if (s1_is_el0) {
+ prot |= PAGE_EXEC;
+ }
+ break;
+ case 2:
+ break;
+ case 3:
+ if (!s1_is_el0) {
+ prot |= PAGE_EXEC;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ if (!extract32(xn, 1, 1)) {
+ if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
+ prot |= PAGE_EXEC;
+ }
+ }
+ }
+ return prot;
+}
+
+/*
+ * Translate section/page access permissions to protection flags
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ * @is_aa64: TRUE if AArch64
+ * @ap: The 2-bit simple AP (AP[2:1])
+ * @ns: NS (non-secure) bit
+ * @xn: XN (execute-never) bit
+ * @pxn: PXN (privileged execute-never) bit
+ */
+static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
+ int ap, int ns, int xn, int pxn)
+{
+ bool is_user = regime_is_user(env, mmu_idx);
+ int prot_rw, user_rw;
+ bool have_wxn;
+ int wxn = 0;
+
+ assert(mmu_idx != ARMMMUIdx_Stage2);
+ assert(mmu_idx != ARMMMUIdx_Stage2_S);
+
+ user_rw = simple_ap_to_rw_prot_is_user(ap, true);
+ if (is_user) {
+ prot_rw = user_rw;
+ } else {
+ if (user_rw && regime_is_pan(env, mmu_idx)) {
+ /* PAN forbids data accesses but doesn't affect insn fetch */
+ prot_rw = 0;
+ } else {
+ prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
+ }
+ }
+
+ if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
+ return prot_rw;
+ }
+
+ /* TODO have_wxn should be replaced with
+ * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
+ * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
+ * compatible processors have EL2, which is required for [U]WXN.
+ */
+ have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
+
+ if (have_wxn) {
+ wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
+ }
+
+ if (is_aa64) {
+ if (regime_has_2_ranges(mmu_idx) && !is_user) {
+ xn = pxn || (user_rw & PAGE_WRITE);
+ }
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ switch (regime_el(env, mmu_idx)) {
+ case 1:
+ case 3:
+ if (is_user) {
+ xn = xn || !(user_rw & PAGE_READ);
+ } else {
+ int uwxn = 0;
+ if (have_wxn) {
+ uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
+ }
+ xn = xn || !(prot_rw & PAGE_READ) || pxn ||
+ (uwxn && (user_rw & PAGE_WRITE));
+ }
+ break;
+ case 2:
+ break;
+ }
+ } else {
+ xn = wxn = 0;
+ }
+
+ if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
+ return prot_rw;
+ }
+ return prot_rw | PAGE_EXEC;
+}
+
/**
* get_phys_addr_lpae: perform one stage of page table walk, LPAE format
*