diff options
-rw-r--r-- | target-arm/cpu.h | 115 | ||||
-rw-r--r-- | target-arm/helper.c | 3 | ||||
-rw-r--r-- | target-arm/translate-a64.c | 5 | ||||
-rw-r--r-- | target-arm/translate.c | 5 | ||||
-rw-r--r-- | target-arm/translate.h | 3 |
5 files changed, 102 insertions, 29 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h index d6e0d4debd..92c6ac6264 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -98,7 +98,7 @@ typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info, struct arm_boot_info; -#define NB_MMU_MODES 4 +#define NB_MMU_MODES 7 /* We currently assume float and double are IEEE single and double precision respectively. @@ -1579,13 +1579,92 @@ static inline CPUARMState *cpu_init(const char *cpu_model) #define cpu_signal_handler cpu_arm_signal_handler #define cpu_list arm_cpu_list -/* MMU modes definitions */ +/* ARM has the following "translation regimes" (as the ARM ARM calls them): + * + * If EL3 is 64-bit: + * + NonSecure EL1 & 0 stage 1 + * + NonSecure EL1 & 0 stage 2 + * + NonSecure EL2 + * + Secure EL1 & EL0 + * + Secure EL3 + * If EL3 is 32-bit: + * + NonSecure PL1 & 0 stage 1 + * + NonSecure PL1 & 0 stage 2 + * + NonSecure PL2 + * + Secure PL0 & PL1 + * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) + * + * For QEMU, an mmu_idx is not quite the same as a translation regime because: + * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they + * may differ in access permissions even if the VA->PA map is the same + * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 + * translation, which means that we have one mmu_idx that deals with two + * concatenated translation regimes [this sort of combined s1+2 TLB is + * architecturally permitted] + * 3. we don't need to allocate an mmu_idx to translations that we won't be + * handling via the TLB. The only way to do a stage 1 translation without + * the immediate stage 2 translation is via the ATS or AT system insns, + * which can be slow-pathed and always do a page table walk. + * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" + * translation regimes, because they map reasonably well to each other + * and they can't both be active at the same time. + * This gives us the following list of mmu_idx values: + * + * NS EL0 (aka NS PL0) stage 1+2 + * NS EL1 (aka NS PL1) stage 1+2 + * NS EL2 (aka NS PL2) + * S EL3 (aka S PL1) + * S EL0 (aka S PL0) + * S EL1 (not used if EL3 is 32 bit) + * NS EL0+1 stage 2 + * + * (The last of these is an mmu_idx because we want to be able to use the TLB + * for the accesses done as part of a stage 1 page table walk, rather than + * having to walk the stage 2 page table over and over.) + * + * Our enumeration includes at the end some entries which are not "true" + * mmu_idx values in that they don't have corresponding TLBs and are only + * valid for doing slow path page table walks. + * + * The constant names here are patterned after the general style of the names + * of the AT/ATS operations. + * The values used are carefully arranged to make mmu_idx => EL lookup easy. + */ +typedef enum ARMMMUIdx { + ARMMMUIdx_S12NSE0 = 0, + ARMMMUIdx_S12NSE1 = 1, + ARMMMUIdx_S1E2 = 2, + ARMMMUIdx_S1E3 = 3, + ARMMMUIdx_S1SE0 = 4, + ARMMMUIdx_S1SE1 = 5, + ARMMMUIdx_S2NS = 6, + /* Indexes below here don't have TLBs and are used only for AT system + * instructions or for the first stage of an S12 page table walk. + */ + ARMMMUIdx_S1NSE0 = 7, + ARMMMUIdx_S1NSE1 = 8, +} ARMMMUIdx; + #define MMU_MODE0_SUFFIX _user #define MMU_MODE1_SUFFIX _kernel #define MMU_USER_IDX 0 -static inline int cpu_mmu_index (CPUARMState *env) + +/* Return the exception level we're running at if this is our mmu_idx */ +static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) { - return arm_current_el(env); + assert(mmu_idx < ARMMMUIdx_S2NS); + return mmu_idx & 3; +} + +/* Determine the current mmu_idx to use for normal loads/stores */ +static inline int cpu_mmu_index(CPUARMState *env) +{ + int el = arm_current_el(env); + + if (el < 2 && arm_is_secure_below_el3(env)) { + return ARMMMUIdx_S1SE0 + el; + } + return el; } /* Return the Exception Level targeted by debug exceptions; @@ -1652,9 +1731,13 @@ static inline bool arm_singlestep_active(CPUARMState *env) /* Bit usage in the TB flags field: bit 31 indicates whether we are * in 32 or 64 bit mode. The meaning of the other bits depends on that. + * We put flags which are shared between 32 and 64 bit mode at the top + * of the word, and flags which apply to only one mode at the bottom. */ #define ARM_TBFLAG_AARCH64_STATE_SHIFT 31 #define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT) +#define ARM_TBFLAG_MMUIDX_SHIFT 28 +#define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT) /* Bit usage when in AArch32 state: */ #define ARM_TBFLAG_THUMB_SHIFT 0 @@ -1663,8 +1746,6 @@ static inline bool arm_singlestep_active(CPUARMState *env) #define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT) #define ARM_TBFLAG_VECSTRIDE_SHIFT 4 #define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT) -#define ARM_TBFLAG_PRIV_SHIFT 6 -#define ARM_TBFLAG_PRIV_MASK (1 << ARM_TBFLAG_PRIV_SHIFT) #define ARM_TBFLAG_VFPEN_SHIFT 7 #define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT) #define ARM_TBFLAG_CONDEXEC_SHIFT 8 @@ -1690,8 +1771,6 @@ static inline bool arm_singlestep_active(CPUARMState *env) #define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT) /* Bit usage when in AArch64 state */ -#define ARM_TBFLAG_AA64_EL_SHIFT 0 -#define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT) #define ARM_TBFLAG_AA64_FPEN_SHIFT 2 #define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT) #define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3 @@ -1702,14 +1781,14 @@ static inline bool arm_singlestep_active(CPUARMState *env) /* some convenience accessor macros */ #define ARM_TBFLAG_AARCH64_STATE(F) \ (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT) +#define ARM_TBFLAG_MMUIDX(F) \ + (((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT) #define ARM_TBFLAG_THUMB(F) \ (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT) #define ARM_TBFLAG_VECLEN(F) \ (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT) #define ARM_TBFLAG_VECSTRIDE(F) \ (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT) -#define ARM_TBFLAG_PRIV(F) \ - (((F) & ARM_TBFLAG_PRIV_MASK) >> ARM_TBFLAG_PRIV_SHIFT) #define ARM_TBFLAG_VFPEN(F) \ (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT) #define ARM_TBFLAG_CONDEXEC(F) \ @@ -1724,8 +1803,6 @@ static inline bool arm_singlestep_active(CPUARMState *env) (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT) #define ARM_TBFLAG_XSCALE_CPAR(F) \ (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT) -#define ARM_TBFLAG_AA64_EL(F) \ - (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT) #define ARM_TBFLAG_AA64_FPEN(F) \ (((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT) #define ARM_TBFLAG_AA64_SS_ACTIVE(F) \ @@ -1749,8 +1826,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, if (is_a64(env)) { *pc = env->pc; - *flags = ARM_TBFLAG_AARCH64_STATE_MASK - | (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT); + *flags = ARM_TBFLAG_AARCH64_STATE_MASK; if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) { *flags |= ARM_TBFLAG_AA64_FPEN_MASK; } @@ -1768,21 +1844,12 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, } } } else { - int privmode; *pc = env->regs[15]; *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) | (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT); - if (arm_feature(env, ARM_FEATURE_M)) { - privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1)); - } else { - privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR; - } - if (privmode) { - *flags |= ARM_TBFLAG_PRIV_MASK; - } if (!(access_secure_reg(env))) { *flags |= ARM_TBFLAG_NS_MASK; } @@ -1810,6 +1877,8 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, << ARM_TBFLAG_XSCALE_CPAR_SHIFT); } + *flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT); + *cs_base = 0; } diff --git a/target-arm/helper.c b/target-arm/helper.c index 3b054ad243..85d02af6d9 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -5179,7 +5179,8 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, uint32_t syn; bool same_el = (arm_current_el(env) != 0); - is_user = mmu_idx == MMU_USER_IDX; + /* TODO: pass the translation regime to get_phys_addr */ + is_user = (arm_mmu_idx_to_el(mmu_idx) == 0); ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot, &page_size); if (ret == 0) { diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c index dac2f631cf..96f14ffc7e 100644 --- a/target-arm/translate-a64.c +++ b/target-arm/translate-a64.c @@ -10922,14 +10922,15 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, dc->bswap_code = 0; dc->condexec_mask = 0; dc->condexec_cond = 0; + dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags); + dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); #if !defined(CONFIG_USER_ONLY) - dc->user = (ARM_TBFLAG_AA64_EL(tb->flags) == 0); + dc->user = (dc->current_el == 0); #endif dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = cpu->cp_regs; - dc->current_el = arm_current_el(env); dc->features = env->features; /* Single step state. The code-generation logic here is: diff --git a/target-arm/translate.c b/target-arm/translate.c index 2c1c2a7085..790e92cc59 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -11036,8 +11036,10 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu, dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags); dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1; dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4; + dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags); + dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); #if !defined(CONFIG_USER_ONLY) - dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0); + dc->user = (dc->current_el == 0); #endif dc->ns = ARM_TBFLAG_NS(tb->flags); dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags); @@ -11046,7 +11048,6 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu, dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags); dc->cp_regs = cpu->cp_regs; - dc->current_el = arm_current_el(env); dc->features = env->features; /* Single step state. The code-generation logic here is: diff --git a/target-arm/translate.h b/target-arm/translate.h index f6ee7892ba..a1eb5b5347 100644 --- a/target-arm/translate.h +++ b/target-arm/translate.h @@ -20,6 +20,7 @@ typedef struct DisasContext { #if !defined(CONFIG_USER_ONLY) int user; #endif + ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */ bool ns; /* Use non-secure CPREG bank on access */ bool cpacr_fpen; /* FP enabled via CPACR.FPEN */ bool vfp_enabled; /* FP enabled via FPSCR.EN */ @@ -69,7 +70,7 @@ static inline int arm_dc_feature(DisasContext *dc, int feature) static inline int get_mem_index(DisasContext *s) { - return s->current_el; + return s->mmu_idx; } /* target-specific extra values for is_jmp */ |