aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2024-02-15 17:36:30 +0000
committerPeter Maydell <peter.maydell@linaro.org>2024-02-15 17:36:30 +0000
commit3ff11e4dcabe2b5b4c26e49d741018ec326f127f (patch)
treef696cd0a71026beb9149b352bba56c01af288c3d /target
parentcc29c12ec629ba68a4a6cb7d165c94cc8502815a (diff)
parentf780e63fe731b058fe52d43653600d8729a1b5f2 (diff)
Merge tag 'pull-target-arm-20240215' of https://git.linaro.org/people/pmaydell/qemu-arm into staging
target-arm queue: * hw/arm/xilinx_zynq: Wire FIQ between CPU <> GIC * linux-user/aarch64: Choose SYNC as the preferred MTE mode * Fix some errors in SVE/SME handling of MTE tags * hw/pci-host/raven.c: Mark raven_io_ops as implementing unaligned accesses * hw/block/tc58128: Don't emit deprecation warning under qtest * tests/qtest: Fix handling of npcm7xx and GMAC tests * hw/arm/virt: Wire up non-secure EL2 virtual timer IRQ * tests/qtest/npcm7xx_emc-test: Connect all NICs to a backend * Don't assert on vmload/vmsave of M-profile CPUs * hw/arm/smmuv3: add support for stage 1 access fault * hw/arm/stellaris: QOM cleanups * Use new CBAR encoding for all v8 CPUs, not all aarch64 CPUs * Improve Cortex_R52 IMPDEF sysreg modelling * Allow access to SPSR_hyp from hyp mode * New board model mps3-an536 (Cortex-R52) # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmXOStQZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3vlDD/9HCq1B6e4hWQBlHrWG5nCK # zBYRZ3GuuYCFB9FJt4EyXjUZ8ohDhLrpXvjhzSF6IqoPo0c0rQnfipeuj8Lu/hAV # JrtHE4jQnE5Q9dwSz3A6oh0z5iIkFB/C1hW6fBDwvgAJUZ5xh5MBcxOvKq1s3WKv # 3JngHC/KJrjgIKbcCV3Nd+OdyIZ7QZNXPwcBX9Zzt2eDkdEzOjcJYF4lisWdGav5 # JVXUeCXtClmFUZrxiGCLeTxb5X+TptxC+kAcPC7F5GjtVSy2800Z9sit2FTqd9Vd # Y+rdA5IIBbZWPQ3OOHbaR69X4tWmc+BIT3nbQlESfV3odg0toQhe7aqn9UPIEU0K # JRrzfodD7r7HK36lZm7ehmevLQnZgO6+MYL4Wrr0pUCNUxqVYlIyaqfsFSaknRg1 # 85L6agJlPYxtvrQtfhIV5m1V3IfyIiC7ECqMFe+QLdbR0ZxS3sI7sJ3O58xmcbDm # SGVLl+xjAW3ZdgOb+k4B/BlPqasiJpuLe7So2e+cvDWN7OM0iJBxFAVz3yhJKGTP # t9adJ1j0SI9XNrRuQkCX1T07Ciiuvr/mM4eY6YK+6TOq1zXks9st51ydbvEKdNW9 # YMFpWwUbYKKmUTEW06Xg5iNatse5kp4MUASF5BERkaGuyhRqLl/8p6jt6Q+9/D1S # 5y8MFjUcdg8t4KcSJgdopw== # =X+zR # -----END PGP SIGNATURE----- # gpg: Signature made Thu 15 Feb 2024 17:33:08 GMT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20240215' of https://git.linaro.org/people/pmaydell/qemu-arm: (35 commits) docs: Add documentation for the mps3-an536 board hw/arm/mps3r: Add remaining devices hw/arm/mps3r: Add GPIO, watchdog, dual-timer, I2C devices hw/arm/mps3r: Add UARTs hw/arm/mps3r: Add CPUs, GIC, and per-CPU RAM hw/arm/mps3r: Initial skeleton for mps3-an536 board hw/misc/mps2-scc: Make changes needed for AN536 FPGA image hw/misc/mps2-scc: Factor out which-board conditionals hw/misc/mps2-scc: Fix condition for CFG3 register target/arm: Allow access to SPSR_hyp from hyp mode target/arm: Add Cortex-R52 IMPDEF sysregs target/arm: The Cortex-R52 has a read-only CBAR target/arm: Use new CBAR encoding for all v8 CPUs, not all aarch64 CPUs hw/arm/stellaris: Add missing QOM 'SoC' parent hw/arm/stellaris: Add missing QOM 'machine' parent hw/arm/stellaris: Convert I2C controller to Resettable interface hw/arm/stellaris: Convert ADC controller to Resettable interface hw/arm/smmuv3: add support for stage 1 access fault tests/qtest: Fix GMAC test to run on a machine in upstream QEMU target/arm: Don't get MDCR_EL2 in pmu_counter_enabled() before checking ARM_FEATURE_PMU ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/helper.c14
-rw-r--r--target/arm/internals.h2
-rw-r--r--target/arm/tcg/cpu32.c109
-rw-r--r--target/arm/tcg/op_helper.c43
-rw-r--r--target/arm/tcg/sme_helper.c8
-rw-r--r--target/arm/tcg/sve_helper.c12
-rw-r--r--target/arm/tcg/translate-a64.h2
-rw-r--r--target/arm/tcg/translate-sme.c15
-rw-r--r--target/arm/tcg/translate-sve.c81
-rw-r--r--target/arm/tcg/translate.c19
10 files changed, 228 insertions, 77 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 8c1ff16f0d..90c4fb72ce 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1187,13 +1187,21 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
bool enabled, prohibited = false, filtered;
bool secure = arm_is_secure(env);
int el = arm_current_el(env);
- uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
- uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
+ uint64_t mdcr_el2;
+ uint8_t hpmn;
+ /*
+ * We might be called for M-profile cores where MDCR_EL2 doesn't
+ * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
+ * must be before we read that value.
+ */
if (!arm_feature(env, ARM_FEATURE_PMU)) {
return false;
}
+ mdcr_el2 = arm_mdcr_el2_eff(env);
+ hpmn = mdcr_el2 & MDCR_HPMN;
+
if (!arm_feature(env, ARM_FEATURE_EL2) ||
(counter < hpmn || counter == 31)) {
e = env->cp15.c9_pmcr & PMCRE;
@@ -9520,7 +9528,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
* AArch64 cores we might need to add a specific feature flag
* to indicate cores with "flavour 2" CBAR.
*/
- if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ if (arm_feature(env, ARM_FEATURE_V8)) {
/* 32 bit view is [31:18] 0...0 [43:32]. */
uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
| extract64(cpu->reset_cbar, 32, 12);
diff --git a/target/arm/internals.h b/target/arm/internals.h
index fc337fe40e..50bff44549 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1278,7 +1278,7 @@ FIELD(MTEDESC, TBI, 4, 2)
FIELD(MTEDESC, TCMA, 6, 2)
FIELD(MTEDESC, WRITE, 8, 1)
FIELD(MTEDESC, ALIGN, 9, 3)
-FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12) /* size - 1 */
+FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */
bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
index 1125305115..6eb08a41b0 100644
--- a/target/arm/tcg/cpu32.c
+++ b/target/arm/tcg/cpu32.c
@@ -800,6 +800,111 @@ static void cortex_r5_initfn(Object *obj)
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
}
+static const ARMCPRegInfo cortex_r52_cp_reginfo[] = {
+ { .name = "CPUACTLR", .cp = 15, .opc1 = 0, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "IMP_ATCMREGIONR",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_BTCMREGIONR",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_CTCMREGIONR",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_CSCTLR",
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_BPCTLR",
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_MEMPROTCLR",
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 1, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_SLAVEPCTLR",
+ .cp = 15, .opc1 = 0, .crn = 11, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_PERIPHREGIONR",
+ .cp = 15, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_FLASHIFREGIONR",
+ .cp = 15, .opc1 = 0, .crn = 15, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_BUILDOPTR",
+ .cp = 15, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_PINOPTR",
+ .cp = 15, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_QOSR",
+ .cp = 15, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_BUSTIMEOUTR",
+ .cp = 15, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_INTMONR",
+ .cp = 15, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 4,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_ICERR0",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_ICERR1",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_DCERR0",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_DCERR1",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_TCMERR0",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_TCMERR1",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 2, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_TCMSYNDR0",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 2, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_TCMSYNDR1",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 2, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_FLASHERR0",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 3, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_FLASHERR1",
+ .cp = 15, .opc1 = 2, .crn = 15, .crm = 3, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_CDBGDR0",
+ .cp = 15, .opc1 = 3, .crn = 15, .crm = 0, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_CBDGBR1",
+ .cp = 15, .opc1 = 3, .crn = 15, .crm = 0, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_TESTR0",
+ .cp = 15, .opc1 = 4, .crn = 15, .crm = 0, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "IMP_TESTR1",
+ .cp = 15, .opc1 = 4, .crn = 15, .crm = 0, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 },
+ { .name = "IMP_CDBGDCI",
+ .cp = 15, .opc1 = 0, .crn = 15, .crm = 15, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 },
+ { .name = "IMP_CDBGDCT",
+ .cp = 15, .opc1 = 3, .crn = 15, .crm = 2, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 },
+ { .name = "IMP_CDBGICT",
+ .cp = 15, .opc1 = 3, .crn = 15, .crm = 2, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 },
+ { .name = "IMP_CDBGDCD",
+ .cp = 15, .opc1 = 3, .crn = 15, .crm = 4, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 },
+ { .name = "IMP_CDBGICD",
+ .cp = 15, .opc1 = 3, .crn = 15, .crm = 4, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP, .resetvalue = 0 },
+};
+
+
static void cortex_r52_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -809,6 +914,8 @@ static void cortex_r52_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_PMSA);
set_feature(&cpu->env, ARM_FEATURE_NEON);
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_AUXCR);
cpu->midr = 0x411fd133; /* r1p3 */
cpu->revidr = 0x00000000;
cpu->reset_fpsid = 0x41034023;
@@ -839,6 +946,8 @@ static void cortex_r52_initfn(Object *obj)
cpu->pmsav7_dregion = 16;
cpu->pmsav8r_hdregion = 16;
+
+ define_arm_cp_regs(cpu, cortex_r52_cp_reginfo);
}
static void cortex_r5f_initfn(Object *obj)
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
index b5ac26061c..c199b69fbf 100644
--- a/target/arm/tcg/op_helper.c
+++ b/target/arm/tcg/op_helper.c
@@ -570,10 +570,24 @@ static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
*/
int curmode = env->uncached_cpsr & CPSR_M;
- if (regno == 17) {
- /* ELR_Hyp: a special case because access from tgtmode is OK */
- if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
- goto undef;
+ if (tgtmode == ARM_CPU_MODE_HYP) {
+ /*
+ * Handle Hyp target regs first because some are special cases
+ * which don't want the usual "not accessible from tgtmode" check.
+ */
+ switch (regno) {
+ case 16 ... 17: /* ELR_Hyp, SPSR_Hyp */
+ if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
+ goto undef;
+ }
+ break;
+ case 13:
+ if (curmode != ARM_CPU_MODE_MON) {
+ goto undef;
+ }
+ break;
+ default:
+ g_assert_not_reached();
}
return;
}
@@ -604,13 +618,6 @@ static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
}
}
- if (tgtmode == ARM_CPU_MODE_HYP) {
- /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
- if (curmode != ARM_CPU_MODE_MON) {
- goto undef;
- }
- }
-
return;
undef:
@@ -625,7 +632,12 @@ void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
switch (regno) {
case 16: /* SPSRs */
- env->banked_spsr[bank_number(tgtmode)] = value;
+ if (tgtmode == (env->uncached_cpsr & CPSR_M)) {
+ /* Only happens for SPSR_Hyp access in Hyp mode */
+ env->spsr = value;
+ } else {
+ env->banked_spsr[bank_number(tgtmode)] = value;
+ }
break;
case 17: /* ELR_Hyp */
env->elr_el[2] = value;
@@ -659,7 +671,12 @@ uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
switch (regno) {
case 16: /* SPSRs */
- return env->banked_spsr[bank_number(tgtmode)];
+ if (tgtmode == (env->uncached_cpsr & CPSR_M)) {
+ /* Only happens for SPSR_Hyp access in Hyp mode */
+ return env->spsr;
+ } else {
+ return env->banked_spsr[bank_number(tgtmode)];
+ }
case 17: /* ELR_Hyp */
return env->elr_el[2];
case 13:
diff --git a/target/arm/tcg/sme_helper.c b/target/arm/tcg/sme_helper.c
index 1ee2690ceb..904bfdac43 100644
--- a/target/arm/tcg/sme_helper.c
+++ b/target/arm/tcg/sme_helper.c
@@ -573,8 +573,8 @@ void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg,
desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
/* Perform gross MTE suppression early. */
- if (!tbi_check(desc, bit55) ||
- tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ if (!tbi_check(mtedesc, bit55) ||
+ tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) {
mtedesc = 0;
}
@@ -750,8 +750,8 @@ void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr,
desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
/* Perform gross MTE suppression early. */
- if (!tbi_check(desc, bit55) ||
- tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ if (!tbi_check(mtedesc, bit55) ||
+ tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) {
mtedesc = 0;
}
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c
index bce4295d28..6853f58c19 100644
--- a/target/arm/tcg/sve_helper.c
+++ b/target/arm/tcg/sve_helper.c
@@ -5800,8 +5800,8 @@ void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
/* Perform gross MTE suppression early. */
- if (!tbi_check(desc, bit55) ||
- tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ if (!tbi_check(mtedesc, bit55) ||
+ tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) {
mtedesc = 0;
}
@@ -6156,8 +6156,8 @@ void sve_ldnfff1_r_mte(CPUARMState *env, void *vg, target_ulong addr,
desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
/* Perform gross MTE suppression early. */
- if (!tbi_check(desc, bit55) ||
- tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ if (!tbi_check(mtedesc, bit55) ||
+ tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) {
mtedesc = 0;
}
@@ -6410,8 +6410,8 @@ void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
/* Perform gross MTE suppression early. */
- if (!tbi_check(desc, bit55) ||
- tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ if (!tbi_check(mtedesc, bit55) ||
+ tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) {
mtedesc = 0;
}
diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h
index 96ba39b37e..7b811b8ac5 100644
--- a/target/arm/tcg/translate-a64.h
+++ b/target/arm/tcg/translate-a64.h
@@ -28,6 +28,8 @@ bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
bool sve_access_check(DisasContext *s);
bool sme_enabled_check(DisasContext *s);
bool sme_enabled_check_with_svcr(DisasContext *s, unsigned);
+uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs,
+ uint32_t msz, bool is_write, uint32_t data);
/* This function corresponds to CheckStreamingSVEEnabled. */
static inline bool sme_sm_enabled_check(DisasContext *s)
diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c
index 8f0dfc884e..46c7fce8b4 100644
--- a/target/arm/tcg/translate-sme.c
+++ b/target/arm/tcg/translate-sme.c
@@ -206,7 +206,7 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
TCGv_ptr t_za, t_pg;
TCGv_i64 addr;
- int svl, desc = 0;
+ uint32_t desc;
bool be = s->be_data == MO_BE;
bool mte = s->mte_active[0];
@@ -224,18 +224,11 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz);
tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
- if (mte) {
- desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
- desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
- desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
- desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st);
- desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1);
- desc <<= SVE_MTEDESC_SHIFT;
- } else {
+ if (!mte) {
addr = clean_data_tbi(s, addr);
}
- svl = streaming_vec_reg_size(s);
- desc = simd_desc(svl, svl, desc);
+
+ desc = make_svemte_desc(s, streaming_vec_reg_size(s), 1, a->esz, a->st, 0);
fns[a->esz][be][a->v][mte][a->st](tcg_env, t_za, t_pg, addr,
tcg_constant_i32(desc));
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
index 296e7d1ce2..ada05aa530 100644
--- a/target/arm/tcg/translate-sve.c
+++ b/target/arm/tcg/translate-sve.c
@@ -4437,33 +4437,47 @@ static const uint8_t dtype_esz[16] = {
3, 2, 1, 3
};
-static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
- int dtype, uint32_t mte_n, bool is_write,
- gen_helper_gvec_mem *fn)
+uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs,
+ uint32_t msz, bool is_write, uint32_t data)
{
- unsigned vsz = vec_full_reg_size(s);
- TCGv_ptr t_pg;
- int desc = 0;
+ uint32_t sizem1;
+ uint32_t desc = 0;
- /*
- * For e.g. LD4, there are not enough arguments to pass all 4
- * registers as pointers, so encode the regno into the data field.
- * For consistency, do this even for LD1.
- */
- if (s->mte_active[0]) {
- int msz = dtype_msz(dtype);
+ /* Assert all of the data fits, with or without MTE enabled. */
+ assert(nregs >= 1 && nregs <= 4);
+ sizem1 = (nregs << msz) - 1;
+ assert(sizem1 <= R_MTEDESC_SIZEM1_MASK >> R_MTEDESC_SIZEM1_SHIFT);
+ assert(data < 1u << SVE_MTEDESC_SHIFT);
+ if (s->mte_active[0]) {
desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, sizem1);
desc <<= SVE_MTEDESC_SHIFT;
- } else {
+ }
+ return simd_desc(vsz, vsz, desc | data);
+}
+
+static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
+ int dtype, uint32_t nregs, bool is_write,
+ gen_helper_gvec_mem *fn)
+{
+ TCGv_ptr t_pg;
+ uint32_t desc;
+
+ if (!s->mte_active[0]) {
addr = clean_data_tbi(s, addr);
}
- desc = simd_desc(vsz, vsz, zt | desc);
+ /*
+ * For e.g. LD4, there are not enough arguments to pass all 4
+ * registers as pointers, so encode the regno into the data field.
+ * For consistency, do this even for LD1.
+ */
+ desc = make_svemte_desc(s, vec_full_reg_size(s), nregs,
+ dtype_msz(dtype), is_write, zt);
t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
@@ -4600,7 +4614,7 @@ static void do_ld_zpa(DisasContext *s, int zt, int pg,
* accessible via the instruction encoding.
*/
assert(fn != NULL);
- do_mem_zpa(s, zt, pg, addr, dtype, nreg, false, fn);
+ do_mem_zpa(s, zt, pg, addr, dtype, nreg + 1, false, fn);
}
static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
@@ -4847,8 +4861,13 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
unsigned vsz = vec_full_reg_size(s);
TCGv_ptr t_pg;
int poff;
+ uint32_t desc;
/* Load the first quadword using the normal predicated load helpers. */
+ if (!s->mte_active[0]) {
+ addr = clean_data_tbi(s, addr);
+ }
+
poff = pred_full_reg_offset(s, pg);
if (vsz > 16) {
/*
@@ -4872,7 +4891,8 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
gen_helper_gvec_mem *fn
= ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
- fn(tcg_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
+ desc = make_svemte_desc(s, 16, 1, dtype_msz(dtype), false, zt);
+ fn(tcg_env, t_pg, addr, tcg_constant_i32(desc));
/* Replicate that first quadword. */
if (vsz > 16) {
@@ -4915,6 +4935,7 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
unsigned vsz_r32;
TCGv_ptr t_pg;
int poff, doff;
+ uint32_t desc;
if (vsz < 32) {
/*
@@ -4927,6 +4948,9 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
}
/* Load the first octaword using the normal predicated load helpers. */
+ if (!s->mte_active[0]) {
+ addr = clean_data_tbi(s, addr);
+ }
poff = pred_full_reg_offset(s, pg);
if (vsz > 32) {
@@ -4951,7 +4975,8 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
gen_helper_gvec_mem *fn
= ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
- fn(tcg_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
+ desc = make_svemte_desc(s, 32, 1, dtype_msz(dtype), false, zt);
+ fn(tcg_env, t_pg, addr, tcg_constant_i32(desc));
/*
* Replicate that first octaword.
@@ -5168,14 +5193,13 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
if (nreg == 0) {
/* ST1 */
fn = fn_single[s->mte_active[0]][be][msz][esz];
- nreg = 1;
} else {
/* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
assert(msz == esz);
fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz];
}
assert(fn != NULL);
- do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg, true, fn);
+ do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg + 1, true, fn);
}
static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
@@ -5223,25 +5247,16 @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
int scale, TCGv_i64 scalar, int msz, bool is_write,
gen_helper_gvec_mem_scatter *fn)
{
- unsigned vsz = vec_full_reg_size(s);
TCGv_ptr t_zm = tcg_temp_new_ptr();
TCGv_ptr t_pg = tcg_temp_new_ptr();
TCGv_ptr t_zt = tcg_temp_new_ptr();
- int desc = 0;
-
- if (s->mte_active[0]) {
- desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
- desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
- desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
- desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
- desc <<= SVE_MTEDESC_SHIFT;
- }
- desc = simd_desc(vsz, vsz, desc | scale);
+ uint32_t desc;
tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg));
tcg_gen_addi_ptr(t_zm, tcg_env, vec_full_reg_offset(s, zm));
tcg_gen_addi_ptr(t_zt, tcg_env, vec_full_reg_offset(s, zt));
+
+ desc = make_svemte_desc(s, vec_full_reg_size(s), 1, msz, is_write, scale);
fn(tcg_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc));
}
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index 5fa8249723..f947c62c6b 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -2822,13 +2822,20 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
break;
case ARM_CPU_MODE_HYP:
/*
- * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
- * (and so we can forbid accesses from EL2 or below). elr_hyp
- * can be accessed also from Hyp mode, so forbid accesses from
- * EL0 or EL1.
+ * r13_hyp can only be accessed from Monitor mode, and so we
+ * can forbid accesses from EL2 or below.
+ * elr_hyp can be accessed also from Hyp mode, so forbid
+ * accesses from EL0 or EL1.
+ * SPSR_hyp is supposed to be in the same category as r13_hyp
+ * and UNPREDICTABLE if accessed from anything except Monitor
+ * mode. However there is some real-world code that will do
+ * it because at least some hardware happens to permit the
+ * access. (Notably a standard Cortex-R52 startup code fragment
+ * does this.) So we permit SPSR_hyp from Hyp mode also, to allow
+ * this (incorrect) guest code to run.
*/
- if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
- (s->current_el < 3 && *regno != 17)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2
+ || (s->current_el < 3 && *regno != 16 && *regno != 17)) {
goto undef;
}
break;