aboutsummaryrefslogtreecommitdiff
path: root/target/arm/ptw.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/ptw.c')
-rw-r--r--target/arm/ptw.c72
1 files changed, 33 insertions, 39 deletions
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 9849949508..64bb6878a4 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -85,6 +85,10 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
GetPhysAddrResult *result,
ARMMMUFaultInfo *fi);
+static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
+ int user_rw, int prot_rw, int xn, int pxn,
+ ARMSecuritySpace in_pa, ARMSecuritySpace out_pa);
+
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
static const uint8_t pamax_map[] = {
[0] = 32,
@@ -1148,7 +1152,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
hwaddr phys_addr;
uint32_t dacr;
bool ns;
- int user_prot;
+ ARMSecuritySpace out_space;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
@@ -1240,16 +1244,19 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
g_assert_not_reached();
}
}
+ out_space = ptw->in_space;
+ if (ns) {
+ /*
+ * The NS bit will (as required by the architecture) have no effect if
+ * the CPU doesn't support TZ or this is a non-secure translation
+ * regime, because the output space will already be non-secure.
+ */
+ out_space = ARMSS_NonSecure;
+ }
if (domain_prot == 3) {
result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
} else {
- if (pxn && !regime_is_user(env, mmu_idx)) {
- xn = 1;
- }
- if (xn && access_type == MMU_INST_FETCH) {
- fi->type = ARMFault_Permission;
- goto do_fault;
- }
+ int user_rw, prot_rw;
if (arm_feature(env, ARM_FEATURE_V6K) &&
(regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
@@ -1259,37 +1266,23 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
fi->type = ARMFault_AccessFlag;
goto do_fault;
}
- result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
- user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
+ prot_rw = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
+ user_rw = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
} else {
- result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
- user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
- }
- if (result->f.prot && !xn) {
- result->f.prot |= PAGE_EXEC;
+ prot_rw = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ user_rw = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
}
+
+ result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw,
+ xn, pxn, result->f.attrs.space, out_space);
if (!(result->f.prot & (1 << access_type))) {
/* Access permission fault. */
fi->type = ARMFault_Permission;
goto do_fault;
}
- if (regime_is_pan(env, mmu_idx) &&
- !regime_is_user(env, mmu_idx) &&
- user_prot &&
- access_type != MMU_INST_FETCH) {
- /* Privileged Access Never fault */
- fi->type = ARMFault_Permission;
- goto do_fault;
- }
- }
- if (ns) {
- /* The NS bit will (as required by the architecture) have no effect if
- * the CPU doesn't support TZ or this is a non-secure translation
- * regime, because the attribute will already be non-secure.
- */
- result->f.attrs.secure = false;
- result->f.attrs.space = ARMSS_NonSecure;
}
+ result->f.attrs.space = out_space;
+ result->f.attrs.secure = arm_space_is_secure(out_space);
result->f.phys_addr = phys_addr;
return false;
do_fault:
@@ -1357,25 +1350,24 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
* @env: CPUARMState
* @mmu_idx: MMU index indicating required translation regime
* @is_aa64: TRUE if AArch64
- * @ap: The 2-bit simple AP (AP[2:1])
+ * @user_rw: Translated AP for user access
+ * @prot_rw: Translated AP for privileged access
* @xn: XN (execute-never) bit
* @pxn: PXN (privileged execute-never) bit
* @in_pa: The original input pa space
* @out_pa: The output pa space, modified by NSTable, NS, and NSE
*/
static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
- int ap, int xn, int pxn,
+ int user_rw, int prot_rw, int xn, int pxn,
ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
{
ARMCPU *cpu = env_archcpu(env);
bool is_user = regime_is_user(env, mmu_idx);
- int prot_rw, user_rw;
bool have_wxn;
int wxn = 0;
assert(!regime_is_stage2(mmu_idx));
- user_rw = simple_ap_to_rw_prot_is_user(ap, true);
if (is_user) {
prot_rw = user_rw;
} else {
@@ -1393,8 +1385,6 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
regime_is_pan(env, mmu_idx) &&
(regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
prot_rw = 0;
- } else {
- prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
}
}
@@ -2044,6 +2034,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
int nse, ns = extract32(attrs, 5, 1);
uint8_t attrindx;
uint64_t mair;
+ int user_rw, prot_rw;
switch (out_space) {
case ARMSS_Root:
@@ -2110,12 +2101,15 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
xn = 0;
ap &= ~1;
}
+
+ user_rw = simple_ap_to_rw_prot_is_user(ap, true);
+ prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
/*
* Note that we modified ptw->in_space earlier for NSTable, but
* result->f.attrs retains a copy of the original security space.
*/
- result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
- result->f.attrs.space, out_space);
+ result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw,
+ xn, pxn, result->f.attrs.space, out_space);
/* Index into MAIR registers for cache attributes */
attrindx = extract32(attrs, 2, 3);