aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2022-07-18 16:29:32 +0100
committerPeter Maydell <peter.maydell@linaro.org>2022-07-18 16:29:32 +0100
commit782378973121addeb11b13fd12a6ac2e69faa33f (patch)
treedbb9f368e6e40347dd41d0a981ac533409e7dae7 /target
parent20f55423960ef9f351b49d171c29d257021d75b3 (diff)
parent004c8a8bc569c8b18fca6fc90ffe3223daaf17b7 (diff)
Merge tag 'pull-target-arm-20220718' of https://git.linaro.org/people/pmaydell/qemu-arm into staging
target-arm queue: * hw/intc/armv7m_nvic: ICPRn must not unpend an IRQ that is being held high * target/arm: Fill in VL for tbflags when SME enabled and SVE disabled * target/arm: Fix aarch64_sve_change_el for SME * linux-user/aarch64: Do not clear PROT_MTE on mprotect * target/arm: Honour VTCR_EL2 bits in Secure EL2 * hw/adc: Fix CONV bit in NPCM7XX ADC CON register * hw/adc: Make adci[*] R/W in NPCM7XX ADC * target/arm: Don't set syndrome ISS for loads and stores with writeback * Align Raspberry Pi DMA interrupts with Linux DTS # gpg: Signature made Mon 18 Jul 2022 14:58:26 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20220718' of https://git.linaro.org/people/pmaydell/qemu-arm: Align Raspberry Pi DMA interrupts with Linux DTS target/arm: Don't set syndrome ISS for loads and stores with writeback hw/adc: Make adci[*] R/W in NPCM7XX ADC hw/adc: Fix CONV bit in NPCM7XX ADC CON register target/arm: Honour VTCR_EL2 bits in Secure EL2 target/arm: Store TCR_EL* registers as uint64_t target/arm: Store VTCR_EL2, VSTCR_EL2 registers as uint64_t target/arm: Fix big-endian host handling of VTCR target/arm: Fold regime_tcr() and regime_tcr_value() together target/arm: Calculate mask/base_mask in get_level1_table_address() target/arm: Define and use new regime_tcr_value() function linux-user/aarch64: Do not clear PROT_MTE on mprotect target/arm: Fix aarch64_sve_change_el for SME target/arm: Fill in VL for tbflags when SME enabled and SVE disabled hw/intc/armv7m_nvic: ICPRn must not unpend an IRQ that is being held high Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpu.c2
-rw-r--r--target/arm/cpu.h38
-rw-r--r--target/arm/debug_helper.c2
-rw-r--r--target/arm/helper.c128
-rw-r--r--target/arm/internals.h34
-rw-r--r--target/arm/ptw.c38
-rw-r--r--target/arm/tlb_helper.c2
-rw-r--r--target/arm/translate-a64.c4
8 files changed, 137 insertions, 111 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 5de7e097e9..1b7b3d76bb 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -226,7 +226,7 @@ static void arm_cpu_reset(DeviceState *dev)
* Enable TBI0 but not TBI1.
* Note that this must match useronly_clean_ptr.
*/
- env->cp15.tcr_el[1].raw_tcr = 5 | (1ULL << 37);
+ env->cp15.tcr_el[1] = 5 | (1ULL << 37);
/* Enable MTE */
if (cpu_isar_feature(aa64_mte, cpu)) {
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 1e36a839ee..e890ee074d 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -166,12 +166,6 @@ typedef struct ARMGenericTimer {
#define GTIMER_HYPVIRT 4
#define NUM_GTIMERS 5
-typedef struct {
- uint64_t raw_tcr;
- uint32_t mask;
- uint32_t base_mask;
-} TCR;
-
#define VTCR_NSW (1u << 29)
#define VTCR_NSA (1u << 30)
#define VSTCR_SW VTCR_NSW
@@ -339,9 +333,9 @@ typedef struct CPUArchState {
uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */
/* MMU translation table base control. */
- TCR tcr_el[4];
- TCR vtcr_el2; /* Virtualization Translation Control. */
- TCR vstcr_el2; /* Secure Virtualization Translation Control. */
+ uint64_t tcr_el[4];
+ uint64_t vtcr_el2; /* Virtualization Translation Control. */
+ uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */
uint32_t c2_data; /* MPU data cacheable bits. */
uint32_t c2_insn; /* MPU instruction cacheable bits. */
union { /* MMU domain access control register
@@ -1418,6 +1412,25 @@ FIELD(CPTR_EL3, TCPAC, 31, 1)
#define TTBCR_SH1 (1U << 28)
#define TTBCR_EAE (1U << 31)
+FIELD(VTCR, T0SZ, 0, 6)
+FIELD(VTCR, SL0, 6, 2)
+FIELD(VTCR, IRGN0, 8, 2)
+FIELD(VTCR, ORGN0, 10, 2)
+FIELD(VTCR, SH0, 12, 2)
+FIELD(VTCR, TG0, 14, 2)
+FIELD(VTCR, PS, 16, 3)
+FIELD(VTCR, VS, 19, 1)
+FIELD(VTCR, HA, 21, 1)
+FIELD(VTCR, HD, 22, 1)
+FIELD(VTCR, HWU59, 25, 1)
+FIELD(VTCR, HWU60, 26, 1)
+FIELD(VTCR, HWU61, 27, 1)
+FIELD(VTCR, HWU62, 28, 1)
+FIELD(VTCR, NSW, 29, 1)
+FIELD(VTCR, NSA, 30, 1)
+FIELD(VTCR, DS, 32, 1)
+FIELD(VTCR, SL2, 33, 1)
+
/* Bit definitions for ARMv8 SPSR (PSTATE) format.
* Only these are valid when in AArch64 mode; in
* AArch32 mode SPSRs are basically CPSR-format.
@@ -3392,9 +3405,12 @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
/*
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
+ * Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect
+ * mprotect but PROT_BTI may be cleared. C.f. the kernel's VM_ARCH_CLEAR.
*/
-#define PAGE_BTI PAGE_TARGET_1
-#define PAGE_MTE PAGE_TARGET_2
+#define PAGE_BTI PAGE_TARGET_1
+#define PAGE_MTE PAGE_TARGET_2
+#define PAGE_TARGET_STICKY PAGE_MTE
#ifdef TARGET_TAGGED_ADDRESSES
/**
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
index d09fccb0a4..c21739242c 100644
--- a/target/arm/debug_helper.c
+++ b/target/arm/debug_helper.c
@@ -439,7 +439,7 @@ static uint32_t arm_debug_exception_fsr(CPUARMState *env)
using_lpae = true;
} else {
if (arm_feature(env, ARM_FEATURE_LPAE) &&
- (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
+ (env->cp15.tcr_el[target_el] & TTBCR_EAE)) {
using_lpae = true;
}
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index cfcad97ce0..1a8b06410e 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -3606,19 +3606,21 @@ static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
};
-static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
- TCR *tcr = raw_ptr(env, ri);
- int maskshift = extract32(value, 0, 3);
+ ARMCPU *cpu = env_archcpu(env);
if (!arm_feature(env, ARM_FEATURE_V8)) {
if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
- /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
- * using Long-desciptor translation table format */
+ /*
+ * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
+ * using Long-descriptor translation table format
+ */
value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
} else if (arm_feature(env, ARM_FEATURE_EL3)) {
- /* In an implementation that includes the Security Extensions
+ /*
+ * In an implementation that includes the Security Extensions
* TTBCR has additional fields PD0 [4] and PD1 [5] for
* Short-descriptor translation table format.
*/
@@ -3628,55 +3630,23 @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
- /* Update the masks corresponding to the TCR bank being written
- * Note that we always calculate mask and base_mask, but
- * they are only used for short-descriptor tables (ie if EAE is 0);
- * for long-descriptor tables the TCR fields are used differently
- * and the mask and base_mask values are meaningless.
- */
- tcr->raw_tcr = value;
- tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
- tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
-}
-
-static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- ARMCPU *cpu = env_archcpu(env);
- TCR *tcr = raw_ptr(env, ri);
-
if (arm_feature(env, ARM_FEATURE_LPAE)) {
/* With LPAE the TTBCR could result in a change of ASID
* via the TTBCR.A1 bit, so do a TLB flush.
*/
tlb_flush(CPU(cpu));
}
- /* Preserve the high half of TCR_EL1, set via TTBCR2. */
- value = deposit64(tcr->raw_tcr, 0, 32, value);
- vmsa_ttbcr_raw_write(env, ri, value);
-}
-
-static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- TCR *tcr = raw_ptr(env, ri);
-
- /* Reset both the TCR as well as the masks corresponding to the bank of
- * the TCR being reset.
- */
- tcr->raw_tcr = 0;
- tcr->mask = 0;
- tcr->base_mask = 0xffffc000u;
+ raw_write(env, ri, value);
}
static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
ARMCPU *cpu = env_archcpu(env);
- TCR *tcr = raw_ptr(env, ri);
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
tlb_flush(CPU(cpu));
- tcr->raw_tcr = value;
+ raw_write(env, ri, value);
}
static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3780,15 +3750,15 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.writefn = vmsa_tcr_el12_write,
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
+ .raw_writefn = raw_write,
+ .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
{ .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
- .raw_writefn = vmsa_ttbcr_raw_write,
- /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
- .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]),
- offsetof(CPUARMState, cp15.tcr_el[1])} },
+ .raw_writefn = raw_write,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
+ offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
};
/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
@@ -3799,8 +3769,8 @@ static const ARMCPRegInfo ttbcr2_reginfo = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.type = ARM_CP_ALIAS,
.bank_fieldoffsets = {
- offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr),
- offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr),
+ offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
+ offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
},
};
@@ -4216,7 +4186,7 @@ static int vae1_tlbmask(CPUARMState *env)
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
uint64_t addr)
{
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint64_t tcr = regime_tcr(env, mmu_idx);
int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
int select = extract64(addr, 55, 1);
@@ -5403,19 +5373,16 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
.access = PL2_RW, .writefn = vmsa_tcr_el12_write,
- /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
{ .name = "VTCR", .state = ARM_CP_STATE_AA32,
.cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
.type = ARM_CP_ALIAS,
.access = PL2_RW, .accessfn = access_el3_aa32ns,
- .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
{ .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
.access = PL2_RW,
- /* no .writefn needed as this can't cause an ASID change;
- * no .raw_writefn or .resetfn needed as we never use mask/base_mask
- */
+ /* no .writefn needed as this can't cause an ASID change */
.fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
{ .name = "VTTBR", .state = ARM_CP_STATE_AA32,
.cp = 15, .opc1 = 6, .crm = 2,
@@ -5645,12 +5612,8 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
{ .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
.access = PL3_RW,
- /* no .writefn needed as this can't cause an ASID change;
- * we must provide a .raw_writefn and .resetfn because we handle
- * reset and migration for the AArch32 TTBCR(S), which might be
- * using mask and base_mask.
- */
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
+ /* no .writefn needed as this can't cause an ASID change */
+ .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
@@ -10158,7 +10121,7 @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ARMMMUIdx mmu_idx, bool data)
{
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint64_t tcr = regime_tcr(env, mmu_idx);
bool epd, hpd, using16k, using64k, tsz_oob, ds;
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
ARMCPU *cpu = env_archcpu(env);
@@ -10849,7 +10812,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
{
CPUARMTBFlags flags = {};
ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint64_t tcr = regime_tcr(env, mmu_idx);
uint64_t sctlr;
int tbii, tbid;
@@ -10882,13 +10845,19 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
}
if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
int sme_el = sme_exception_el(env, el);
+ bool sm = FIELD_EX64(env->svcr, SVCR, SM);
DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
if (sme_el == 0) {
/* Similarly, do not compute SVL if SME is disabled. */
- DP_TBFLAG_A64(flags, SVL, sve_vqm1_for_el_sm(env, el, true));
+ int svl = sve_vqm1_for_el_sm(env, el, true);
+ DP_TBFLAG_A64(flags, SVL, svl);
+ if (sm) {
+ /* If SVE is disabled, we will not have set VL above. */
+ DP_TBFLAG_A64(flags, VL, svl);
+ }
}
- if (FIELD_EX64(env->svcr, SVCR, SM)) {
+ if (sm) {
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
}
@@ -11222,6 +11191,21 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
}
}
+static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
+{
+ int exc_el;
+
+ if (sm) {
+ exc_el = sme_exception_el(env, el);
+ } else {
+ exc_el = sve_exception_el(env, el);
+ }
+ if (exc_el) {
+ return 0; /* disabled */
+ }
+ return sve_vqm1_for_el_sm(env, el, sm);
+}
+
/*
* Notice a change in SVE vector size when changing EL.
*/
@@ -11230,7 +11214,7 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
{
ARMCPU *cpu = env_archcpu(env);
int old_len, new_len;
- bool old_a64, new_a64;
+ bool old_a64, new_a64, sm;
/* Nothing to do if no SVE. */
if (!cpu_isar_feature(aa64_sve, cpu)) {
@@ -11250,7 +11234,8 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
* invoke ResetSVEState when taking an exception from, or
* returning to, AArch32 state when PSTATE.SM is enabled.
*/
- if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) {
+ sm = FIELD_EX64(env->svcr, SVCR, SM);
+ if (old_a64 != new_a64 && sm) {
arm_reset_sve_state(env);
return;
}
@@ -11267,10 +11252,13 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
* we already have the correct register contents when encountering the
* vq0->vq0 transition between EL0->EL1.
*/
- old_len = (old_a64 && !sve_exception_el(env, old_el)
- ? sve_vqm1_for_el(env, old_el) : 0);
- new_len = (new_a64 && !sve_exception_el(env, new_el)
- ? sve_vqm1_for_el(env, new_el) : 0);
+ old_len = new_len = 0;
+ if (old_a64) {
+ old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
+ }
+ if (new_a64) {
+ new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
+ }
/* When changing vector length, clear inaccessible state. */
if (new_len < old_len) {
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 00e2e710f6..b8fefdff67 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -252,9 +252,9 @@ unsigned int arm_pamax(ARMCPU *cpu);
*/
static inline bool extended_addresses_enabled(CPUARMState *env)
{
- TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
+ uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
return arm_el_is_aa64(env, 1) ||
- (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
+ (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
}
/* Update a QEMU watchpoint based on the information the guest has set in the
@@ -777,20 +777,36 @@ static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
}
-/* Return the TCR controlling this translation regime */
-static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
+/*
+ * These are the fields in VTCR_EL2 which affect both the Secure stage 2
+ * and the Non-Secure stage 2 translation regimes (and hence which are
+ * not present in VSTCR_EL2).
+ */
+#define VTCR_SHARED_FIELD_MASK \
+ (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
+ R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
+ R_VTCR_DS_MASK)
+
+/* Return the value of the TCR controlling this translation regime */
+static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
{
if (mmu_idx == ARMMMUIdx_Stage2) {
- return &env->cp15.vtcr_el2;
+ return env->cp15.vtcr_el2;
}
if (mmu_idx == ARMMMUIdx_Stage2_S) {
/*
- * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
- * those are not currently used by QEMU, so just return VSTCR_EL2.
+ * Secure stage 2 shares fields from VTCR_EL2. We merge those
+ * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
+ * value so the callers don't need to special case this.
+ *
+ * If a future architecture change defines bits in VSTCR_EL2 that
+ * overlap with these VTCR_EL2 fields we may need to revisit this.
*/
- return &env->cp15.vstcr_el2;
+ uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
+ v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
+ return v;
}
- return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
+ return env->cp15.tcr_el[regime_el(env, mmu_idx)];
}
/**
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index e71fc1f429..3261039d93 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -241,9 +241,9 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
if (arm_is_secure_below_el3(env)) {
/* Check if page table walk is to secure or non-secure PA space. */
if (*is_secure) {
- *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
+ *is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
} else {
- *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
+ *is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
}
} else {
assert(!*is_secure);
@@ -315,20 +315,24 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
uint32_t *table, uint32_t address)
{
/* Note that we can only get here for an AArch32 PL0/PL1 lookup */
- TCR *tcr = regime_tcr(env, mmu_idx);
+ uint64_t tcr = regime_tcr(env, mmu_idx);
+ int maskshift = extract32(tcr, 0, 3);
+ uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
+ uint32_t base_mask;
- if (address & tcr->mask) {
- if (tcr->raw_tcr & TTBCR_PD1) {
+ if (address & mask) {
+ if (tcr & TTBCR_PD1) {
/* Translation table walk disabled for TTBR1 */
return false;
}
*table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
} else {
- if (tcr->raw_tcr & TTBCR_PD0) {
+ if (tcr & TTBCR_PD0) {
/* Translation table walk disabled for TTBR0 */
return false;
}
- *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
+ base_mask = ~((uint32_t)0x3fffu >> maskshift);
+ *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
}
*table |= (address >> 18) & 0x3ffc;
return true;
@@ -820,7 +824,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
ARMMMUIdx mmu_idx)
{
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint64_t tcr = regime_tcr(env, mmu_idx);
uint32_t el = regime_el(env, mmu_idx);
int select, tsz;
bool epd, hpd;
@@ -994,7 +998,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
uint32_t attrs;
int32_t stride;
int addrsize, inputsize, outputsize;
- TCR *tcr = regime_tcr(env, mmu_idx);
+ uint64_t tcr = regime_tcr(env, mmu_idx);
int ap, ns, xn, pxn;
uint32_t el = regime_el(env, mmu_idx);
uint64_t descaddrmask;
@@ -1112,8 +1116,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
* For stage 2 translations the starting level is specified by the
* VTCR_EL2.SL0 field (whose interpretation depends on the page size)
*/
- uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
- uint32_t sl2 = extract64(tcr->raw_tcr, 33, 1);
+ uint32_t sl0 = extract32(tcr, 6, 2);
+ uint32_t sl2 = extract64(tcr, 33, 1);
uint32_t startlevel;
bool ok;
@@ -2337,9 +2341,9 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
ipa_secure = attrs->secure;
if (arm_is_secure_below_el3(env)) {
if (ipa_secure) {
- attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
+ attrs->secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
} else {
- attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
+ attrs->secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
}
} else {
assert(!ipa_secure);
@@ -2381,11 +2385,11 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
if (arm_is_secure_below_el3(env)) {
if (ipa_secure) {
attrs->secure =
- !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
+ !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
} else {
attrs->secure =
- !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
- || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)));
+ !((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))
+ || (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)));
}
}
return 0;
@@ -2462,7 +2466,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
int r_el = regime_el(env, mmu_idx);
if (arm_el_is_aa64(env, r_el)) {
int pamax = arm_pamax(env_archcpu(env));
- uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
+ uint64_t tcr = env->cp15.tcr_el[r_el];
int addrtop, tbi;
tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
index 7d8a86b3c4..5a709eab56 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tlb_helper.c
@@ -20,7 +20,7 @@ bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
return true;
}
if (arm_feature(env, ARM_FEATURE_LPAE)
- && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
+ && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
return true;
}
return false;
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index b7b64f7358..163df8c615 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -3138,7 +3138,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
bool is_store = false;
bool is_extended = false;
bool is_unpriv = (idx == 2);
- bool iss_valid = !is_vector;
+ bool iss_valid;
bool post_index;
bool writeback;
int memidx;
@@ -3191,6 +3191,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
g_assert_not_reached();
}
+ iss_valid = !is_vector && !writeback;
+
if (rn == 31) {
gen_check_sp_alignment(s);
}