aboutsummaryrefslogtreecommitdiff
path: root/target-arm
diff options
context:
space:
mode:
authorEdgar E. Iglesias <edgar.iglesias@xilinx.com>2015-10-26 14:02:07 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-10-27 15:59:47 +0000
commit9b539263faa5c1b7fce2551092b5c7b6eea92081 (patch)
tree0ba1b8617611f9c4ac60dfc946cf5e24a3fab2ed /target-arm
parentd759a457a144844bff259aafda093b24e92c116d (diff)
target-arm: Add support for S1 + S2 MMU translations
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> Message-id: 1445864527-14520-15-git-send-email-edgar.iglesias@gmail.com Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/helper.c38
-rw-r--r--target-arm/op_helper.c1
2 files changed, 32 insertions, 7 deletions
diff --git a/target-arm/helper.c b/target-arm/helper.c
index eb9a00d14e..1966f9c045 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -7196,14 +7196,38 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
ARMMMUFaultInfo *fi)
{
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- /* TODO: when we support EL2 we should here call ourselves recursively
- * to do the stage 1 and then stage 2 translations. The arm_ld*_ptw
- * functions will also need changing to perform ARMMMUIdx_S2NS loads
- * rather than direct physical memory loads when appropriate.
- * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ /* Call ourselves recursively to do the stage 1 and then stage 2
+ * translations.
*/
- assert(!arm_feature(env, ARM_FEATURE_EL2));
- mmu_idx += ARMMMUIdx_S1NSE0;
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ hwaddr ipa;
+ int s2_prot;
+ int ret;
+
+ ret = get_phys_addr(env, address, access_type,
+ mmu_idx + ARMMMUIdx_S1NSE0, &ipa, attrs,
+ prot, page_size, fsr, fi);
+
+ /* If S1 fails or S2 is disabled, return early. */
+ if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
+ *phys_ptr = ipa;
+ return ret;
+ }
+
+ /* S1 is done. Now do S2 translation. */
+ ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
+ phys_ptr, attrs, &s2_prot,
+ page_size, fsr, fi);
+ fi->s2addr = ipa;
+ /* Combine the S1 and S2 perms. */
+ *prot &= s2_prot;
+ return ret;
+ } else {
+ /*
+ * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ */
+ mmu_idx += ARMMMUIdx_S1NSE0;
+ }
}
/* The page table entries may downgrade secure to non-secure, but
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 333078a169..a4c4ebf9cd 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -101,6 +101,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
target_el = exception_target_el(env);
if (fi.stage2) {
target_el = 2;
+ env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
}
same_el = arm_current_el(env) == target_el;
/* AArch64 syndrome does not have an LPAE bit */