diff options
Diffstat (limited to 'target/arm/cpu.h')
-rw-r--r-- | target/arm/cpu.h | 421 |
1 files changed, 161 insertions, 260 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 608fcbd0b7..0b3036c484 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -144,11 +144,12 @@ typedef struct ARMGenericTimer { uint64_t ctl; /* Timer Control register */ } ARMGenericTimer; -#define GTIMER_PHYS 0 -#define GTIMER_VIRT 1 -#define GTIMER_HYP 2 -#define GTIMER_SEC 3 -#define NUM_GTIMERS 4 +#define GTIMER_PHYS 0 +#define GTIMER_VIRT 1 +#define GTIMER_HYP 2 +#define GTIMER_SEC 3 +#define GTIMER_HYPVIRT 4 +#define NUM_GTIMERS 5 typedef struct { uint64_t raw_tcr; @@ -1424,13 +1425,6 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) #define HCR_ATA (1ULL << 56) #define HCR_DCT (1ULL << 57) -/* - * When we actually implement ARMv8.1-VHE we should add HCR_E2H to - * HCR_MASK and then clear it again if the feature bit is not set in - * hcr_write(). - */ -#define HCR_MASK ((1ULL << 34) - 1) - #define SCR_NS (1U << 0) #define SCR_IRQ (1U << 1) #define SCR_FIQ (1U << 2) @@ -2582,6 +2576,19 @@ struct ARMCPRegInfo { * fieldoffset is 0 then no reset will be done. */ CPResetFn *resetfn; + + /* + * "Original" writefn and readfn. + * For ARMv8.1-VHE register aliases, we overwrite the read/write + * accessor functions of various EL1/EL0 to perform the runtime + * check for which sysreg should actually be modified, and then + * forwards the operation. Before overwriting the accessors, + * the original function is copied here, so that accesses that + * really do go to the EL1/EL0 version proceed normally. + * (The corresponding EL2 register is linked via opaque.) + */ + CPReadFn *orig_readfn; + CPWriteFn *orig_writefn; }; /* Macros which are lvalues for the field in CPUARMState for the @@ -2702,117 +2709,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); #define ARM_CPUID_TI915T 0x54029152 #define ARM_CPUID_TI925T 0x54029252 -static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, - unsigned int target_el) -{ - CPUARMState *env = cs->env_ptr; - unsigned int cur_el = arm_current_el(env); - bool secure = arm_is_secure(env); - bool pstate_unmasked; - int8_t unmasked = 0; - uint64_t hcr_el2; - - /* Don't take exceptions if they target a lower EL. - * This check should catch any exceptions that would not be taken but left - * pending. - */ - if (cur_el > target_el) { - return false; - } - - hcr_el2 = arm_hcr_el2_eff(env); - - switch (excp_idx) { - case EXCP_FIQ: - pstate_unmasked = !(env->daif & PSTATE_F); - break; - - case EXCP_IRQ: - pstate_unmasked = !(env->daif & PSTATE_I); - break; - - case EXCP_VFIQ: - if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { - /* VFIQs are only taken when hypervized and non-secure. */ - return false; - } - return !(env->daif & PSTATE_F); - case EXCP_VIRQ: - if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { - /* VIRQs are only taken when hypervized and non-secure. */ - return false; - } - return !(env->daif & PSTATE_I); - default: - g_assert_not_reached(); - } - - /* Use the target EL, current execution state and SCR/HCR settings to - * determine whether the corresponding CPSR bit is used to mask the - * interrupt. - */ - if ((target_el > cur_el) && (target_el != 1)) { - /* Exceptions targeting a higher EL may not be maskable */ - if (arm_feature(env, ARM_FEATURE_AARCH64)) { - /* 64-bit masking rules are simple: exceptions to EL3 - * can't be masked, and exceptions to EL2 can only be - * masked from Secure state. The HCR and SCR settings - * don't affect the masking logic, only the interrupt routing. - */ - if (target_el == 3 || !secure) { - unmasked = 1; - } - } else { - /* The old 32-bit-only environment has a more complicated - * masking setup. HCR and SCR bits not only affect interrupt - * routing but also change the behaviour of masking. - */ - bool hcr, scr; - - switch (excp_idx) { - case EXCP_FIQ: - /* If FIQs are routed to EL3 or EL2 then there are cases where - * we override the CPSR.F in determining if the exception is - * masked or not. If neither of these are set then we fall back - * to the CPSR.F setting otherwise we further assess the state - * below. - */ - hcr = hcr_el2 & HCR_FMO; - scr = (env->cp15.scr_el3 & SCR_FIQ); - - /* When EL3 is 32-bit, the SCR.FW bit controls whether the - * CPSR.F bit masks FIQ interrupts when taken in non-secure - * state. If SCR.FW is set then FIQs can be masked by CPSR.F - * when non-secure but only when FIQs are only routed to EL3. - */ - scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr); - break; - case EXCP_IRQ: - /* When EL3 execution state is 32-bit, if HCR.IMO is set then - * we may override the CPSR.I masking when in non-secure state. - * The SCR.IRQ setting has already been taken into consideration - * when setting the target EL, so it does not have a further - * affect here. - */ - hcr = hcr_el2 & HCR_IMO; - scr = false; - break; - default: - g_assert_not_reached(); - } - - if ((scr || hcr) && !secure) { - unmasked = 1; - } - } - } - - /* The PSTATE bits only mask the interrupt if we have not overriden the - * ability above. - */ - return unmasked || pstate_unmasked; -} - #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_ARM_CPU @@ -2826,18 +2722,21 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, * + NonSecure EL1 & 0 stage 1 * + NonSecure EL1 & 0 stage 2 * + NonSecure EL2 - * + Secure EL1 & EL0 + * + NonSecure EL2 & 0 (ARMv8.1-VHE) + * + Secure EL1 & 0 * + Secure EL3 * If EL3 is 32-bit: * + NonSecure PL1 & 0 stage 1 * + NonSecure PL1 & 0 stage 2 * + NonSecure PL2 - * + Secure PL0 & PL1 + * + Secure PL0 + * + Secure PL1 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) * * For QEMU, an mmu_idx is not quite the same as a translation regime because: - * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they - * may differ in access permissions even if the VA->PA map is the same + * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes, + * because they may differ in access permissions even if the VA->PA map is + * the same * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 * translation, which means that we have one mmu_idx that deals with two * concatenated translation regimes [this sort of combined s1+2 TLB is @@ -2849,19 +2748,23 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" * translation regimes, because they map reasonably well to each other * and they can't both be active at the same time. - * This gives us the following list of mmu_idx values: + * 5. we want to be able to use the TLB for accesses done as part of a + * stage1 page table walk, rather than having to walk the stage2 page + * table over and over. + * + * This gives us the following list of cases: * - * NS EL0 (aka NS PL0) stage 1+2 - * NS EL1 (aka NS PL1) stage 1+2 + * NS EL0 EL1&0 stage 1+2 (aka NS PL0) + * NS EL1 EL1&0 stage 1+2 (aka NS PL1) + * NS EL0 EL2&0 + * NS EL2 EL2&0 * NS EL2 (aka NS PL2) + * S EL0 EL1&0 (aka S PL0) + * S EL1 EL1&0 (not used if EL3 is 32 bit) * S EL3 (aka S PL1) - * S EL0 (aka S PL0) - * S EL1 (not used if EL3 is 32 bit) - * NS EL0+1 stage 2 + * NS EL1&0 stage 2 * - * (The last of these is an mmu_idx because we want to be able to use the TLB - * for the accesses done as part of a stage 1 page table walk, rather than - * having to walk the stage 2 page table over and over.) + * for a total of 9 different mmu_idx. * * R profile CPUs have an MPU, but can use the same set of MMU indexes * as A profile. They only need to distinguish NS EL0 and NS EL1 (and @@ -2899,106 +2802,88 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, * For M profile we arrange them to have a bit for priv, a bit for negpri * and a bit for secure. */ -#define ARM_MMU_IDX_A 0x10 /* A profile */ -#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ -#define ARM_MMU_IDX_M 0x40 /* M profile */ +#define ARM_MMU_IDX_A 0x10 /* A profile */ +#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ +#define ARM_MMU_IDX_M 0x40 /* M profile */ -/* meanings of the bits for M profile mmu idx values */ -#define ARM_MMU_IDX_M_PRIV 0x1 +/* Meanings of the bits for M profile mmu idx values */ +#define ARM_MMU_IDX_M_PRIV 0x1 #define ARM_MMU_IDX_M_NEGPRI 0x2 -#define ARM_MMU_IDX_M_S 0x4 +#define ARM_MMU_IDX_M_S 0x4 /* Secure */ -#define ARM_MMU_IDX_TYPE_MASK (~0x7) -#define ARM_MMU_IDX_COREIDX_MASK 0x7 +#define ARM_MMU_IDX_TYPE_MASK \ + (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB) +#define ARM_MMU_IDX_COREIDX_MASK 0xf typedef enum ARMMMUIdx { - ARMMMUIdx_S12NSE0 = 0 | ARM_MMU_IDX_A, - ARMMMUIdx_S12NSE1 = 1 | ARM_MMU_IDX_A, - ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A, - ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A, - ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A, - ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A, - ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A, - ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M, - ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M, - ARMMMUIdx_MUserNegPri = 2 | ARM_MMU_IDX_M, - ARMMMUIdx_MPrivNegPri = 3 | ARM_MMU_IDX_M, - ARMMMUIdx_MSUser = 4 | ARM_MMU_IDX_M, - ARMMMUIdx_MSPriv = 5 | ARM_MMU_IDX_M, - ARMMMUIdx_MSUserNegPri = 6 | ARM_MMU_IDX_M, - ARMMMUIdx_MSPrivNegPri = 7 | ARM_MMU_IDX_M, - /* Indexes below here don't have TLBs and are used only for AT system - * instructions or for the first stage of an S12 page table walk. + /* + * A-profile. */ - ARMMMUIdx_S1NSE0 = 0 | ARM_MMU_IDX_NOTLB, - ARMMMUIdx_S1NSE1 = 1 | ARM_MMU_IDX_NOTLB, -} ARMMMUIdx; + ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A, -/* Bit macros for the core-mmu-index values for each index, - * for use when calling tlb_flush_by_mmuidx() and friends. - */ -typedef enum ARMMMUIdxBit { - ARMMMUIdxBit_S12NSE0 = 1 << 0, - ARMMMUIdxBit_S12NSE1 = 1 << 1, - ARMMMUIdxBit_S1E2 = 1 << 2, - ARMMMUIdxBit_S1E3 = 1 << 3, - ARMMMUIdxBit_S1SE0 = 1 << 4, - ARMMMUIdxBit_S1SE1 = 1 << 5, - ARMMMUIdxBit_S2NS = 1 << 6, - ARMMMUIdxBit_MUser = 1 << 0, - ARMMMUIdxBit_MPriv = 1 << 1, - ARMMMUIdxBit_MUserNegPri = 1 << 2, - ARMMMUIdxBit_MPrivNegPri = 1 << 3, - ARMMMUIdxBit_MSUser = 1 << 4, - ARMMMUIdxBit_MSPriv = 1 << 5, - ARMMMUIdxBit_MSUserNegPri = 1 << 6, - ARMMMUIdxBit_MSPrivNegPri = 1 << 7, -} ARMMMUIdxBit; + ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A, -#define MMU_USER_IDX 0 + ARMMMUIdx_E2 = 3 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_2 = 4 | ARM_MMU_IDX_A, -static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) -{ - return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; -} + ARMMMUIdx_SE10_0 = 5 | ARM_MMU_IDX_A, + ARMMMUIdx_SE10_1 = 6 | ARM_MMU_IDX_A, + ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A, -static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) -{ - if (arm_feature(env, ARM_FEATURE_M)) { - return mmu_idx | ARM_MMU_IDX_M; - } else { - return mmu_idx | ARM_MMU_IDX_A; - } -} + ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A, -/* Return the exception level we're running at if this is our mmu_idx */ -static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) -{ - switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) { - case ARM_MMU_IDX_A: - return mmu_idx & 3; - case ARM_MMU_IDX_M: - return mmu_idx & ARM_MMU_IDX_M_PRIV; - default: - g_assert_not_reached(); - } -} + /* + * These are not allocated TLBs and are used only for AT system + * instructions or for the first stage of an S12 page table walk. + */ + ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB, + ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB, + + /* + * M-profile. + */ + ARMMMUIdx_MUser = ARM_MMU_IDX_M, + ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV, + ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI, + ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI, + ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S, + ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S, + ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S, + ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S, +} ARMMMUIdx; /* - * Return the MMU index for a v7M CPU with all relevant information - * manually specified. + * Bit macros for the core-mmu-index values for each index, + * for use when calling tlb_flush_by_mmuidx() and friends. */ -ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env, - bool secstate, bool priv, bool negpri); +#define TO_CORE_BIT(NAME) \ + ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK) -/* Return the MMU index for a v7M CPU in the specified security and - * privilege state. - */ -ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, - bool secstate, bool priv); +typedef enum ARMMMUIdxBit { + TO_CORE_BIT(E10_0), + TO_CORE_BIT(E20_0), + TO_CORE_BIT(E10_1), + TO_CORE_BIT(E2), + TO_CORE_BIT(E20_2), + TO_CORE_BIT(SE10_0), + TO_CORE_BIT(SE10_1), + TO_CORE_BIT(SE3), + TO_CORE_BIT(Stage2), + + TO_CORE_BIT(MUser), + TO_CORE_BIT(MPriv), + TO_CORE_BIT(MUserNegPri), + TO_CORE_BIT(MPrivNegPri), + TO_CORE_BIT(MSUser), + TO_CORE_BIT(MSPriv), + TO_CORE_BIT(MSUserNegPri), + TO_CORE_BIT(MSPrivNegPri), +} ARMMMUIdxBit; -/* Return the MMU index for a v7M CPU in the specified security state */ -ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); +#undef TO_CORE_BIT + +#define MMU_USER_IDX 0 /** * cpu_mmu_index: @@ -3159,15 +3044,7 @@ static inline bool arm_sctlr_b(CPUARMState *env) (env->cp15.sctlr_el[1] & SCTLR_B) != 0; } -static inline uint64_t arm_sctlr(CPUARMState *env, int el) -{ - if (el == 0) { - /* FIXME: ARMv8.1-VHE S2 translation regime. */ - return env->cp15.sctlr_el[1]; - } else { - return env->cp15.sctlr_el[el]; - } -} +uint64_t arm_sctlr(CPUARMState *env, int el); static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, bool sctlr_b) @@ -3221,55 +3098,73 @@ typedef ARMCPU ArchCPU; * We put flags which are shared between 32 and 64 bit mode at the top * of the word, and flags which apply to only one mode at the bottom. * + * 31 20 18 14 9 0 + * +--------------+-----+-----+----------+--------------+ + * | | | TBFLAG_A32 | | + * | | +-----+----------+ TBFLAG_AM32 | + * | TBFLAG_ANY | |TBFLAG_M32| | + * | | +-+----------+--------------| + * | | | TBFLAG_A64 | + * +--------------+---------+---------------------------+ + * 31 20 15 0 + * * Unless otherwise noted, these bits are cached in env->hflags. */ FIELD(TBFLAG_ANY, AARCH64_STATE, 31, 1) -FIELD(TBFLAG_ANY, MMUIDX, 28, 3) -FIELD(TBFLAG_ANY, SS_ACTIVE, 27, 1) -FIELD(TBFLAG_ANY, PSTATE_SS, 26, 1) /* Not cached. */ +FIELD(TBFLAG_ANY, SS_ACTIVE, 30, 1) +FIELD(TBFLAG_ANY, PSTATE_SS, 29, 1) /* Not cached. */ +FIELD(TBFLAG_ANY, BE_DATA, 28, 1) +FIELD(TBFLAG_ANY, MMUIDX, 24, 4) /* Target EL if we take a floating-point-disabled exception */ -FIELD(TBFLAG_ANY, FPEXC_EL, 24, 2) -FIELD(TBFLAG_ANY, BE_DATA, 23, 1) +FIELD(TBFLAG_ANY, FPEXC_EL, 22, 2) +/* For A-profile only, target EL for debug exceptions. */ +FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 20, 2) + /* - * For A-profile only, target EL for debug exceptions. - * Note that this overlaps with the M-profile-only HANDLER and STACKCHECK bits. + * Bit usage when in AArch32 state, both A- and M-profile. */ -FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 21, 2) +FIELD(TBFLAG_AM32, CONDEXEC, 0, 8) /* Not cached. */ +FIELD(TBFLAG_AM32, THUMB, 8, 1) /* Not cached. */ -/* Bit usage when in AArch32 state: */ -FIELD(TBFLAG_A32, THUMB, 0, 1) /* Not cached. */ -FIELD(TBFLAG_A32, VECLEN, 1, 3) /* Not cached. */ -FIELD(TBFLAG_A32, VECSTRIDE, 4, 2) /* Not cached. */ +/* + * Bit usage when in AArch32 state, for A-profile only. + */ +FIELD(TBFLAG_A32, VECLEN, 9, 3) /* Not cached. */ +FIELD(TBFLAG_A32, VECSTRIDE, 12, 2) /* Not cached. */ /* * We store the bottom two bits of the CPAR as TB flags and handle * checks on the other bits at runtime. This shares the same bits as * VECSTRIDE, which is OK as no XScale CPU has VFP. * Not cached, because VECLEN+VECSTRIDE are not cached. */ -FIELD(TBFLAG_A32, XSCALE_CPAR, 4, 2) +FIELD(TBFLAG_A32, XSCALE_CPAR, 12, 2) +FIELD(TBFLAG_A32, VFPEN, 14, 1) /* Partially cached, minus FPEXC. */ +FIELD(TBFLAG_A32, SCTLR_B, 15, 1) +FIELD(TBFLAG_A32, HSTR_ACTIVE, 16, 1) /* * Indicates whether cp register reads and writes by guest code should access * the secure or nonsecure bank of banked registers; note that this is not * the same thing as the current security state of the processor! */ -FIELD(TBFLAG_A32, NS, 6, 1) -FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */ -FIELD(TBFLAG_A32, CONDEXEC, 8, 8) /* Not cached. */ -FIELD(TBFLAG_A32, SCTLR_B, 16, 1) -FIELD(TBFLAG_A32, HSTR_ACTIVE, 17, 1) - -/* For M profile only, set if FPCCR.LSPACT is set */ -FIELD(TBFLAG_A32, LSPACT, 18, 1) /* Not cached. */ -/* For M profile only, set if we must create a new FP context */ -FIELD(TBFLAG_A32, NEW_FP_CTXT_NEEDED, 19, 1) /* Not cached. */ -/* For M profile only, set if FPCCR.S does not match current security state */ -FIELD(TBFLAG_A32, FPCCR_S_WRONG, 20, 1) /* Not cached. */ -/* For M profile only, Handler (ie not Thread) mode */ -FIELD(TBFLAG_A32, HANDLER, 21, 1) -/* For M profile only, whether we should generate stack-limit checks */ -FIELD(TBFLAG_A32, STACKCHECK, 22, 1) - -/* Bit usage when in AArch64 state */ +FIELD(TBFLAG_A32, NS, 17, 1) + +/* + * Bit usage when in AArch32 state, for M-profile only. + */ +/* Handler (ie not Thread) mode */ +FIELD(TBFLAG_M32, HANDLER, 9, 1) +/* Whether we should generate stack-limit checks */ +FIELD(TBFLAG_M32, STACKCHECK, 10, 1) +/* Set if FPCCR.LSPACT is set */ +FIELD(TBFLAG_M32, LSPACT, 11, 1) /* Not cached. */ +/* Set if we must create a new FP context */ +FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 12, 1) /* Not cached. */ +/* Set if FPCCR.S does not match current security state */ +FIELD(TBFLAG_M32, FPCCR_S_WRONG, 13, 1) /* Not cached. */ + +/* + * Bit usage when in AArch64 state + */ FIELD(TBFLAG_A64, TBII, 0, 2) FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2) FIELD(TBFLAG_A64, ZCR_LEN, 4, 4) @@ -3277,6 +3172,7 @@ FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1) FIELD(TBFLAG_A64, BT, 9, 1) FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */ FIELD(TBFLAG_A64, TBID, 12, 2) +FIELD(TBFLAG_A64, UNPRIV, 14, 1) static inline bool bswap_code(bool sctlr_b) { @@ -3685,6 +3581,11 @@ static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; } +static inline bool isar_feature_aa64_vh(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0; +} + static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; |