diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2023-09-25 10:09:04 -0400 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2023-09-25 10:09:04 -0400 |
commit | bf94b63d76bafd452d536c3f45cdfdefb98045dc (patch) | |
tree | df87846b49689cd4b68ecb456e63d37a5d7b50ed /target | |
parent | b55e4b9c0525560577384adfc6d30eb0daa8d7be (diff) | |
parent | 231f6a7d66254a58bedbee458591b780e0a507b1 (diff) |
Merge tag 'pull-target-arm-20230921' of https://git.linaro.org/people/pmaydell/qemu-arm into staging
target-arm queue:
* target/m68k: Add URL to semihosting spec
* docs/devel/loads-stores: Fix git grep regexes
* hw/arm/boot: Set SCR_EL3.FGTEn when booting kernel
* linux-user: Correct SME feature names reported in cpuinfo
* linux-user: Add missing arm32 hwcaps
* Don't skip MTE checks for LDRT/STRT at EL0
* Implement FEAT_HBC
* Implement FEAT_MOPS
* audio/jackaudio: Avoid dynamic stack allocation
* sbsa-ref: add non-secure EL2 virtual timer
* elf2dmp: improve Win2022, Win11 and large dumps
# -----BEGIN PGP SIGNATURE-----
#
# iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmUMfwAZHHBldGVyLm1h
# eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3jvnD/0QE/oOxfr+wkDUkTasSwVc
# UNfhObMj3h8x2XApqXckXnckew97I7hh7OLk35p9Ncea7fb6CvGMZ/DJir7AG4aQ
# Anpd5g2Qo0AMfPIyvoJ5pgtqZ1aS/EpBfYixmjL/zY6+zNzoVzWG/KfL+XamW6ir
# 6U7EqcAUzfX0+Splcxs5WgCDI5nGtn0B42EwOMpmwsH4opfr6HTn8Rzbn9gIwKU7
# u82PaKAqWPYD0ev9NQra+VVTrrFS4SCcqkV+SoYu0Cg5vvBlgAVcx0Zz2objp9LC
# 96fOtFH4Rch611j87WiGvN+fxQawqYzAYdy2y+j0wwuonTH9G3PpdZZT0557NjeS
# rFpW2UQebDqZ3ZTDwhzefsVKc3emLZtEd+RFa/YcDtao0afKfbSHv5A2/pGHxzlv
# 8psKOOH82WXTOHwFKA2o0lXDAauzirY+1Avy0vozNzPCdErXPgMHY4tABU77PpER
# Pz17jJO9C1AGyQVF+o09ieJR2Du5Wb2LLcZP3+5Ctm0SNVmREKKNcMkhJiEM9snm
# PQBR7FNEbAuQAO2MDK70dWUcTNtOv4Q1jgTR+aYd2MrArxCmAA5Zd9gjeYDwv6XH
# n242ONDAhlG1fY5f5giE3vCrcV1FDbvHEn6GDVilgMrF3a3Iw30xUaATiO09hIfi
# XAwGwLtMsp21WDa5PsfZVw==
# =dalQ
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 21 Sep 2023 13:36:00 EDT
# gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg: issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full]
# gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full]
# gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full]
# gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE
* tag 'pull-target-arm-20230921' of https://git.linaro.org/people/pmaydell/qemu-arm: (30 commits)
elf2dmp: rework PDB_STREAM_INDEXES::segments obtaining
elf2dmp: use Linux mmap with MAP_NORESERVE when possible
elf2dmp: introduce merging of physical memory runs
elf2dmp: introduce physical block alignment
elf2dmp: replace PE export name check with PDB name check
sbsa-ref: add non-secure EL2 virtual timer
audio/jackaudio: Avoid dynamic stack allocation in qjack_process()
audio/jackaudio: Avoid dynamic stack allocation in qjack_client_init
target/arm: Enable FEAT_MOPS for CPU 'max'
target/arm: Implement the CPY* instructions
target/arm: Implement MTE tag-checking functions for FEAT_MOPS copies
target/arm: Implement the SETG* instructions
target/arm: Define new TB flag for ATA0
target/arm: Implement the SET* instructions
target/arm: Implement MTE tag-checking functions for FEAT_MOPS
target/arm: New function allocation_tag_mem_probe()
target/arm: Define syndrome function for MOPS exceptions
target/arm: Pass unpriv bool to get_a64_user_mem_index()
target/arm: Implement FEAT_MOPS enable bits
target/arm: Don't skip MTE checks for LDRT/STRT at EL0
...
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/cpu.h | 35 | ||||
-rw-r--r-- | target/arm/helper.c | 41 | ||||
-rw-r--r-- | target/arm/internals.h | 55 | ||||
-rw-r--r-- | target/arm/syndrome.h | 12 | ||||
-rw-r--r-- | target/arm/tcg/a64.decode | 38 | ||||
-rw-r--r-- | target/arm/tcg/cpu64.c | 5 | ||||
-rw-r--r-- | target/arm/tcg/helper-a64.c | 878 | ||||
-rw-r--r-- | target/arm/tcg/helper-a64.h | 14 | ||||
-rw-r--r-- | target/arm/tcg/hflags.c | 21 | ||||
-rw-r--r-- | target/arm/tcg/mte_helper.c | 281 | ||||
-rw-r--r-- | target/arm/tcg/translate-a64.c | 164 | ||||
-rw-r--r-- | target/arm/tcg/translate.h | 4 | ||||
-rw-r--r-- | target/m68k/m68k-semi.c | 4 |
13 files changed, 1483 insertions, 69 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h index f2e3dc49a6..bd55c5dabf 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1315,6 +1315,7 @@ void pmu_init(ARMCPU *cpu); #define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */ #define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */ #define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */ +#define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */ #define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */ #define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */ #define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */ @@ -2166,6 +2167,7 @@ FIELD(ID_AA64ISAR0, SHA1, 8, 4) FIELD(ID_AA64ISAR0, SHA2, 12, 4) FIELD(ID_AA64ISAR0, CRC32, 16, 4) FIELD(ID_AA64ISAR0, ATOMIC, 20, 4) +FIELD(ID_AA64ISAR0, TME, 24, 4) FIELD(ID_AA64ISAR0, RDM, 28, 4) FIELD(ID_AA64ISAR0, SHA3, 32, 4) FIELD(ID_AA64ISAR0, SM3, 36, 4) @@ -2200,6 +2202,13 @@ FIELD(ID_AA64ISAR2, APA3, 12, 4) FIELD(ID_AA64ISAR2, MOPS, 16, 4) FIELD(ID_AA64ISAR2, BC, 20, 4) FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4) +FIELD(ID_AA64ISAR2, CLRBHB, 28, 4) +FIELD(ID_AA64ISAR2, SYSREG_128, 32, 4) +FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4) +FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4) +FIELD(ID_AA64ISAR2, RPRFM, 48, 4) +FIELD(ID_AA64ISAR2, CSSC, 52, 4) +FIELD(ID_AA64ISAR2, ATS1A, 60, 4) FIELD(ID_AA64PFR0, EL0, 0, 4) FIELD(ID_AA64PFR0, EL1, 4, 4) @@ -2227,6 +2236,12 @@ FIELD(ID_AA64PFR1, SME, 24, 4) FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4) FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4) FIELD(ID_AA64PFR1, NMI, 36, 4) +FIELD(ID_AA64PFR1, MTE_FRAC, 40, 4) +FIELD(ID_AA64PFR1, GCS, 44, 4) +FIELD(ID_AA64PFR1, THE, 48, 4) +FIELD(ID_AA64PFR1, MTEX, 52, 4) +FIELD(ID_AA64PFR1, DF2, 56, 4) +FIELD(ID_AA64PFR1, PFAR, 60, 4) FIELD(ID_AA64MMFR0, PARANGE, 0, 4) FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4) @@ -2258,6 +2273,7 @@ FIELD(ID_AA64MMFR1, AFP, 44, 4) FIELD(ID_AA64MMFR1, NTLBPA, 48, 4) FIELD(ID_AA64MMFR1, TIDCP1, 52, 4) FIELD(ID_AA64MMFR1, CMOW, 56, 4) +FIELD(ID_AA64MMFR1, ECBHB, 60, 4) FIELD(ID_AA64MMFR2, CNP, 0, 4) FIELD(ID_AA64MMFR2, UAO, 4, 4) @@ -2279,7 +2295,9 @@ FIELD(ID_AA64DFR0, DEBUGVER, 0, 4) FIELD(ID_AA64DFR0, TRACEVER, 4, 4) FIELD(ID_AA64DFR0, PMUVER, 8, 4) FIELD(ID_AA64DFR0, BRPS, 12, 4) +FIELD(ID_AA64DFR0, PMSS, 16, 4) FIELD(ID_AA64DFR0, WRPS, 20, 4) +FIELD(ID_AA64DFR0, SEBEP, 24, 4) FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) FIELD(ID_AA64DFR0, PMSVER, 32, 4) FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) @@ -2287,12 +2305,14 @@ FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4) FIELD(ID_AA64DFR0, MTPMU, 48, 4) FIELD(ID_AA64DFR0, BRBE, 52, 4) +FIELD(ID_AA64DFR0, EXTTRCBUFF, 56, 4) FIELD(ID_AA64DFR0, HPMN0, 60, 4) FIELD(ID_AA64ZFR0, SVEVER, 0, 4) FIELD(ID_AA64ZFR0, AES, 4, 4) FIELD(ID_AA64ZFR0, BITPERM, 16, 4) FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4) +FIELD(ID_AA64ZFR0, B16B16, 24, 4) FIELD(ID_AA64ZFR0, SHA3, 32, 4) FIELD(ID_AA64ZFR0, SM4, 40, 4) FIELD(ID_AA64ZFR0, I8MM, 44, 4) @@ -2300,9 +2320,13 @@ FIELD(ID_AA64ZFR0, F32MM, 52, 4) FIELD(ID_AA64ZFR0, F64MM, 56, 4) FIELD(ID_AA64SMFR0, F32F32, 32, 1) +FIELD(ID_AA64SMFR0, BI32I32, 33, 1) FIELD(ID_AA64SMFR0, B16F32, 34, 1) FIELD(ID_AA64SMFR0, F16F32, 35, 1) FIELD(ID_AA64SMFR0, I8I32, 36, 4) +FIELD(ID_AA64SMFR0, F16F16, 42, 1) +FIELD(ID_AA64SMFR0, B16B16, 43, 1) +FIELD(ID_AA64SMFR0, I16I32, 44, 4) FIELD(ID_AA64SMFR0, F64F64, 48, 1) FIELD(ID_AA64SMFR0, I16I64, 52, 4) FIELD(ID_AA64SMFR0, SMEVER, 56, 4) @@ -3147,6 +3171,7 @@ FIELD(TBFLAG_A64, SVL, 24, 4) FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) FIELD(TBFLAG_A64, FGT_ERET, 29, 1) FIELD(TBFLAG_A64, NAA, 30, 1) +FIELD(TBFLAG_A64, ATA0, 31, 1) /* * Helpers for using the above. @@ -4065,6 +4090,11 @@ static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0; } +static inline bool isar_feature_aa64_hbc(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, BC) != 0; +} + static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id) { return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1; @@ -4253,6 +4283,11 @@ static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id) return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0; } +static inline bool isar_feature_aa64_mops(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS); +} + /* * Feature tests for "does this exist in either 32-bit or 64-bit?" */ diff --git a/target/arm/helper.c b/target/arm/helper.c index 3b22596eab..83620787b4 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -5980,7 +5980,10 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, { uint64_t valid_mask = 0; - /* No features adding bits to HCRX are implemented. */ + /* FEAT_MOPS adds MSCEn and MCE2 */ + if (cpu_isar_feature(aa64_mops, env_archcpu(env))) { + valid_mask |= HCRX_MSCEN | HCRX_MCE2; + } /* Clear RES0 bits. */ env->cp15.hcrx_el2 = value & valid_mask; @@ -6009,13 +6012,24 @@ uint64_t arm_hcrx_el2_eff(CPUARMState *env) { /* * The bits in this register behave as 0 for all purposes other than - * direct reads of the register if: - * - EL2 is not enabled in the current security state, - * - SCR_EL3.HXEn is 0. - */ - if (!arm_is_el2_enabled(env) - || (arm_feature(env, ARM_FEATURE_EL3) - && !(env->cp15.scr_el3 & SCR_HXEN))) { + * direct reads of the register if SCR_EL3.HXEn is 0. + * If EL2 is not enabled in the current security state, then the + * bit may behave as if 0, or as if 1, depending on the bit. + * For the moment, we treat the EL2-disabled case as taking + * priority over the HXEn-disabled case. This is true for the only + * bit for a feature which we implement where the answer is different + * for the two cases (MSCEn for FEAT_MOPS). + * This may need to be revisited for future bits. + */ + if (!arm_is_el2_enabled(env)) { + uint64_t hcrx = 0; + if (cpu_isar_feature(aa64_mops, env_archcpu(env))) { + /* MSCEn behaves as 1 if EL2 is not enabled */ + hcrx |= HCRX_MSCEN; + } + return hcrx; + } + if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) { return 0; } return env->cp15.hcrx_el2; @@ -8621,11 +8635,16 @@ void register_cp_regs_for_features(ARMCPU *cpu) R_ID_AA64ZFR0_F64MM_MASK }, { .name = "ID_AA64SMFR0_EL1", .exported_bits = R_ID_AA64SMFR0_F32F32_MASK | + R_ID_AA64SMFR0_BI32I32_MASK | R_ID_AA64SMFR0_B16F32_MASK | R_ID_AA64SMFR0_F16F32_MASK | R_ID_AA64SMFR0_I8I32_MASK | + R_ID_AA64SMFR0_F16F16_MASK | + R_ID_AA64SMFR0_B16B16_MASK | + R_ID_AA64SMFR0_I16I32_MASK | R_ID_AA64SMFR0_F64F64_MASK | R_ID_AA64SMFR0_I16I64_MASK | + R_ID_AA64SMFR0_SMEVER_MASK | R_ID_AA64SMFR0_FA64_MASK }, { .name = "ID_AA64MMFR0_EL1", .exported_bits = R_ID_AA64MMFR0_ECV_MASK, @@ -8676,7 +8695,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) .exported_bits = R_ID_AA64ISAR2_WFXT_MASK | R_ID_AA64ISAR2_RPRES_MASK | R_ID_AA64ISAR2_GPA3_MASK | - R_ID_AA64ISAR2_APA3_MASK }, + R_ID_AA64ISAR2_APA3_MASK | + R_ID_AA64ISAR2_MOPS_MASK | + R_ID_AA64ISAR2_BC_MASK | + R_ID_AA64ISAR2_RPRFM_MASK | + R_ID_AA64ISAR2_CSSC_MASK }, { .name = "ID_AA64ISAR*_EL1_RESERVED", .is_glob = true }, }; diff --git a/target/arm/internals.h b/target/arm/internals.h index 5f5393b25c..1dd9182a54 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1272,6 +1272,61 @@ FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12) /* size - 1 */ bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); +/** + * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation + * @env: CPU env + * @ptr: start address of memory region (dirty pointer) + * @size: length of region (guaranteed not to cross a page boundary) + * @desc: MTEDESC descriptor word (0 means no MTE checks) + * Returns: the size of the region that can be copied without hitting + * an MTE tag failure + * + * Note that we assume that the caller has already checked the TBI + * and TCMA bits with mte_checks_needed() and an MTE check is definitely + * required. + */ +uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, + uint32_t desc); + +/** + * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS + * operation going in the reverse direction + * @env: CPU env + * @ptr: *end* address of memory region (dirty pointer) + * @size: length of region (guaranteed not to cross a page boundary) + * @desc: MTEDESC descriptor word (0 means no MTE checks) + * Returns: the size of the region that can be copied without hitting + * an MTE tag failure + * + * Note that we assume that the caller has already checked the TBI + * and TCMA bits with mte_checks_needed() and an MTE check is definitely + * required. + */ +uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, + uint32_t desc); + +/** + * mte_check_fail: Record an MTE tag check failure + * @env: CPU env + * @desc: MTEDESC descriptor word + * @dirty_ptr: Failing dirty address + * @ra: TCG retaddr + * + * This may never return (if the MTE tag checks are configured to fault). + */ +void mte_check_fail(CPUARMState *env, uint32_t desc, + uint64_t dirty_ptr, uintptr_t ra); + +/** + * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation + * @env: CPU env + * @dirty_ptr: Start address of memory region (dirty pointer) + * @size: length of region (guaranteed not to cross page boundary) + * @desc: MTEDESC descriptor word + */ +void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, + uint32_t desc); + static inline int allocation_tag_from_addr(uint64_t ptr) { return extract64(ptr, 56, 4); diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h index 8a6b8f8162..5d34755508 100644 --- a/target/arm/syndrome.h +++ b/target/arm/syndrome.h @@ -58,6 +58,7 @@ enum arm_exception_class { EC_DATAABORT = 0x24, EC_DATAABORT_SAME_EL = 0x25, EC_SPALIGNMENT = 0x26, + EC_MOP = 0x27, EC_AA32_FPTRAP = 0x28, EC_AA64_FPTRAP = 0x2c, EC_SERROR = 0x2f, @@ -334,4 +335,15 @@ static inline uint32_t syn_serror(uint32_t extra) return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra; } +static inline uint32_t syn_mop(bool is_set, bool is_setg, int options, + bool epilogue, bool wrong_option, bool option_a, + int destreg, int srcreg, int sizereg) +{ + return (EC_MOP << ARM_EL_EC_SHIFT) | ARM_EL_IL | + (is_set << 24) | (is_setg << 23) | (options << 19) | + (epilogue << 18) | (wrong_option << 17) | (option_a << 16) | + (destreg << 10) | (srcreg << 5) | sizereg; +} + + #endif /* TARGET_ARM_SYNDROME_H */ diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode index ef64a3f9cb..0cf1147074 100644 --- a/target/arm/tcg/a64.decode +++ b/target/arm/tcg/a64.decode @@ -126,7 +126,8 @@ CBZ sf:1 011010 nz:1 ................... rt:5 &cbz imm=%imm19 TBZ . 011011 nz:1 ..... .............. rt:5 &tbz imm=%imm14 bitpos=%imm31_19 -B_cond 0101010 0 ................... 0 cond:4 imm=%imm19 +# B.cond and BC.cond +B_cond 0101010 0 ................... c:1 cond:4 imm=%imm19 BR 1101011 0000 11111 000000 rn:5 00000 &r BLR 1101011 0001 11111 000000 rn:5 00000 &r @@ -553,3 +554,38 @@ LDGM 11011001 11 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0 STZ2G 11011001 11 1 ......... 01 ..... ..... @ldst_tag p=1 w=1 STZ2G 11011001 11 1 ......... 10 ..... ..... @ldst_tag p=0 w=0 STZ2G 11011001 11 1 ......... 11 ..... ..... @ldst_tag p=0 w=1 + +# Memory operations (memset, memcpy, memmove) +# Each of these comes in a set of three, eg SETP (prologue), SETM (main), +# SETE (epilogue), and each of those has different flavours to +# indicate whether memory accesses should be unpriv or non-temporal. +# We don't distinguish temporal and non-temporal accesses, but we +# do need to report it in syndrome register values. + +# Memset +&set rs rn rd unpriv nontemp +# op2 bit 1 is nontemporal bit +@set .. ......... rs:5 .. nontemp:1 unpriv:1 .. rn:5 rd:5 &set + +SETP 00 011001110 ..... 00 . . 01 ..... ..... @set +SETM 00 011001110 ..... 01 . . 01 ..... ..... @set +SETE 00 011001110 ..... 10 . . 01 ..... ..... @set + +# Like SET, but also setting MTE tags +SETGP 00 011101110 ..... 00 . . 01 ..... ..... @set +SETGM 00 011101110 ..... 01 . . 01 ..... ..... @set +SETGE 00 011101110 ..... 10 . . 01 ..... ..... @set + +# Memmove/Memcopy: the CPY insns allow overlapping src/dest and +# copy in the correct direction; the CPYF insns always copy forwards. +# +# options has the nontemporal and unpriv bits for src and dest +&cpy rs rn rd options +@cpy .. ... . ..... rs:5 options:4 .. rn:5 rd:5 &cpy + +CPYFP 00 011 0 01000 ..... .... 01 ..... ..... @cpy +CPYFM 00 011 0 01010 ..... .... 01 ..... ..... @cpy +CPYFE 00 011 0 01100 ..... .... 01 ..... ..... @cpy +CPYP 00 011 1 01000 ..... .... 01 ..... ..... @cpy +CPYM 00 011 1 01010 ..... .... 01 ..... ..... @cpy +CPYE 00 011 1 01100 ..... .... 01 ..... ..... @cpy diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index 7264ab5ead..68928e5127 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -1027,6 +1027,11 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */ cpu->isar.id_aa64isar1 = t; + t = cpu->isar.id_aa64isar2; + t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */ + t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */ + cpu->isar.id_aa64isar2 = t; + t = cpu->isar.id_aa64pfr0; t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */ diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 0cf56f6dc4..84f54750fc 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -968,3 +968,881 @@ void HELPER(unaligned_access)(CPUARMState *env, uint64_t addr, arm_cpu_do_unaligned_access(env_cpu(env), addr, access_type, mmu_idx, GETPC()); } + +/* Memory operations (memset, memmove, memcpy) */ + +/* + * Return true if the CPY* and SET* insns can execute; compare + * pseudocode CheckMOPSEnabled(), though we refactor it a little. + */ +static bool mops_enabled(CPUARMState *env) +{ + int el = arm_current_el(env); + + if (el < 2 && + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && + !(arm_hcrx_el2_eff(env) & HCRX_MSCEN)) { + return false; + } + + if (el == 0) { + if (!el_is_in_host(env, 0)) { + return env->cp15.sctlr_el[1] & SCTLR_MSCEN; + } else { + return env->cp15.sctlr_el[2] & SCTLR_MSCEN; + } + } + return true; +} + +static void check_mops_enabled(CPUARMState *env, uintptr_t ra) +{ + if (!mops_enabled(env)) { + raise_exception_ra(env, EXCP_UDEF, syn_uncategorized(), + exception_target_el(env), ra); + } +} + +/* + * Return the target exception level for an exception due + * to mismatched arguments in a FEAT_MOPS copy or set. + * Compare pseudocode MismatchedCpySetTargetEL() + */ +static int mops_mismatch_exception_target_el(CPUARMState *env) +{ + int el = arm_current_el(env); + + if (el > 1) { + return el; + } + if (el == 0 && (arm_hcr_el2_eff(env) & HCR_TGE)) { + return 2; + } + if (el == 1 && (arm_hcrx_el2_eff(env) & HCRX_MCE2)) { + return 2; + } + return 1; +} + +/* + * Check whether an M or E instruction was executed with a CF value + * indicating the wrong option for this implementation. + * Assumes we are always Option A. + */ +static void check_mops_wrong_option(CPUARMState *env, uint32_t syndrome, + uintptr_t ra) +{ + if (env->CF != 0) { + syndrome |= 1 << 17; /* Set the wrong-option bit */ + raise_exception_ra(env, EXCP_UDEF, syndrome, + mops_mismatch_exception_target_el(env), ra); + } +} + +/* + * Return the maximum number of bytes we can transfer starting at addr + * without crossing a page boundary. + */ +static uint64_t page_limit(uint64_t addr) +{ + return TARGET_PAGE_ALIGN(addr + 1) - addr; +} + +/* + * Return the number of bytes we can copy starting from addr and working + * backwards without crossing a page boundary. + */ +static uint64_t page_limit_rev(uint64_t addr) +{ + return (addr & ~TARGET_PAGE_MASK) + 1; +} + +/* + * Perform part of a memory set on an area of guest memory starting at + * toaddr (a dirty address) and extending for setsize bytes. + * + * Returns the number of bytes actually set, which might be less than + * setsize; the caller should loop until the whole set has been done. + * The caller should ensure that the guest registers are correct + * for the possibility that the first byte of the set encounters + * an exception or watchpoint. We guarantee not to take any faults + * for bytes other than the first. + */ +static uint64_t set_step(CPUARMState *env, uint64_t toaddr, + uint64_t setsize, uint32_t data, int memidx, + uint32_t *mtedesc, uintptr_t ra) +{ + void *mem; + + setsize = MIN(setsize, page_limit(toaddr)); + if (*mtedesc) { + uint64_t mtesize = mte_mops_probe(env, toaddr, setsize, *mtedesc); + if (mtesize == 0) { + /* Trap, or not. All CPU state is up to date */ + mte_check_fail(env, *mtedesc, toaddr, ra); + /* Continue, with no further MTE checks required */ + *mtedesc = 0; + } else { + /* Advance to the end, or to the tag mismatch */ + setsize = MIN(setsize, mtesize); + } + } + + toaddr = useronly_clean_ptr(toaddr); + /* + * Trapless lookup: returns NULL for invalid page, I/O, + * watchpoints, clean pages, etc. + */ + mem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, memidx); + +#ifndef CONFIG_USER_ONLY + if (unlikely(!mem)) { + /* + * Slow-path: just do one byte write. This will handle the + * watchpoint, invalid page, etc handling correctly. + * For clean code pages, the next iteration will see + * the page dirty and will use the fast path. + */ + cpu_stb_mmuidx_ra(env, toaddr, data, memidx, ra); + return 1; + } +#endif + /* Easy case: just memset the host memory */ + memset(mem, data, setsize); + return setsize; +} + +/* + * Similar, but setting tags. The architecture requires us to do this + * in 16-byte chunks. SETP accesses are not tag checked; they set + * the tags. + */ +static uint64_t set_step_tags(CPUARMState *env, uint64_t toaddr, + uint64_t setsize, uint32_t data, int memidx, + uint32_t *mtedesc, uintptr_t ra) +{ + void *mem; + uint64_t cleanaddr; + + setsize = MIN(setsize, page_limit(toaddr)); + + cleanaddr = useronly_clean_ptr(toaddr); + /* + * Trapless lookup: returns NULL for invalid page, I/O, + * watchpoints, clean pages, etc. + */ + mem = tlb_vaddr_to_host(env, cleanaddr, MMU_DATA_STORE, memidx); + +#ifndef CONFIG_USER_ONLY + if (unlikely(!mem)) { + /* + * Slow-path: just do one write. This will handle the + * watchpoint, invalid page, etc handling correctly. + * The architecture requires that we do 16 bytes at a time, + * and we know both ptr and size are 16 byte aligned. + * For clean code pages, the next iteration will see + * the page dirty and will use the fast path. + */ + uint64_t repldata = data * 0x0101010101010101ULL; + MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, memidx); + cpu_st16_mmu(env, toaddr, int128_make128(repldata, repldata), oi16, ra); + mte_mops_set_tags(env, toaddr, 16, *mtedesc); + return 16; + } +#endif + /* Easy case: just memset the host memory */ + memset(mem, data, setsize); + mte_mops_set_tags(env, toaddr, setsize, *mtedesc); + return setsize; +} + +typedef uint64_t StepFn(CPUARMState *env, uint64_t toaddr, + uint64_t setsize, uint32_t data, + int memidx, uint32_t *mtedesc, uintptr_t ra); + +/* Extract register numbers from a MOPS exception syndrome value */ +static int mops_destreg(uint32_t syndrome) +{ + return extract32(syndrome, 10, 5); +} + +static int mops_srcreg(uint32_t syndrome) +{ + return extract32(syndrome, 5, 5); +} + +static int mops_sizereg(uint32_t syndrome) +{ + return extract32(syndrome, 0, 5); +} + +/* + * Return true if TCMA and TBI bits mean we need to do MTE checks. + * We only need to do this once per MOPS insn, not for every page. + */ +static bool mte_checks_needed(uint64_t ptr, uint32_t desc) +{ + int bit55 = extract64(ptr, 55, 1); + + /* + * Note that tbi_check() returns true for "access checked" but + * tcma_check() returns true for "access unchecked". + */ + if (!tbi_check(desc, bit55)) { + return false; + } + return !tcma_check(desc, bit55, allocation_tag_from_addr(ptr)); +} + +/* Take an exception if the SETG addr/size are not granule aligned */ +static void check_setg_alignment(CPUARMState *env, uint64_t ptr, uint64_t size, + uint32_t memidx, uintptr_t ra) +{ + if ((size != 0 && !QEMU_IS_ALIGNED(ptr, TAG_GRANULE)) || + !QEMU_IS_ALIGNED(size, TAG_GRANULE)) { + arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE, + memidx, ra); + + } +} + +/* + * For the Memory Set operation, our implementation chooses + * always to use "option A", where we update Xd to the final + * address in the SETP insn, and set Xn to be -(bytes remaining). + * On SETM and SETE insns we only need update Xn. + * + * @env: CPU + * @syndrome: syndrome value for mismatch exceptions + * (also contains the register numbers we need to use) + * @mtedesc: MTE descriptor word + * @stepfn: function which does a single part of the set operation + * @is_setg: true if this is the tag-setting SETG variant + */ +static void do_setp(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, + StepFn *stepfn, bool is_setg, uintptr_t ra) +{ + /* Prologue: we choose to do up to the next page boundary */ + int rd = mops_destreg(syndrome); + int rs = mops_srcreg(syndrome); + int rn = mops_sizereg(syndrome); + uint8_t data = env->xregs[rs]; + uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX); + uint64_t toaddr = env->xregs[rd]; + uint64_t setsize = env->xregs[rn]; + uint64_t stagesetsize, step; + + check_mops_enabled(env, ra); + + if (setsize > INT64_MAX) { + setsize = INT64_MAX; + if (is_setg) { + setsize &= ~0xf; + } + } + + if (unlikely(is_setg)) { + check_setg_alignment(env, toaddr, setsize, memidx, ra); + } else if (!mte_checks_needed(toaddr, mtedesc)) { + mtedesc = 0; + } + + stagesetsize = MIN(setsize, page_limit(toaddr)); + while (stagesetsize) { + env->xregs[rd] = toaddr; + env->xregs[rn] = setsize; + step = stepfn(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra); + toaddr += step; + setsize -= step; + stagesetsize -= step; + } + /* Insn completed, so update registers to the Option A format */ + env->xregs[rd] = toaddr + setsize; + env->xregs[rn] = -setsize; + + /* Set NZCV = 0000 to indicate we are an Option A implementation */ + env->NF = 0; + env->ZF = 1; /* our env->ZF encoding is inverted */ + env->CF = 0; + env->VF = 0; + return; +} + +void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) +{ + do_setp(env, syndrome, mtedesc, set_step, false, GETPC()); +} + +void HELPER(setgp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) +{ + do_setp(env, syndrome, mtedesc, set_step_tags, true, GETPC()); +} + +static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, + StepFn *stepfn, bool is_setg, uintptr_t ra) +{ + /* Main: we choose to do all the full-page chunks */ + CPUState *cs = env_cpu(env); + int rd = mops_destreg(syndrome); + int rs = mops_srcreg(syndrome); + int rn = mops_sizereg(syndrome); + uint8_t data = env->xregs[rs]; + uint64_t toaddr = env->xregs[rd] + env->xregs[rn]; + uint64_t setsize = -env->xregs[rn]; + uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX); + uint64_t step, stagesetsize; + + check_mops_enabled(env, ra); + + /* + * We're allowed to NOP out "no data to copy" before the consistency + * checks; we choose to do so. + */ + if (env->xregs[rn] == 0) { + return; + } + + check_mops_wrong_option(env, syndrome, ra); + + /* + * Our implementation will work fine even if we have an unaligned + * destination address, and because we update Xn every time around + * the loop below and the return value from stepfn() may be less + * than requested, we might find toaddr is unaligned. So we don't + * have an IMPDEF check for alignment here. + */ + + if (unlikely(is_setg)) { + check_setg_alignment(env, toaddr, setsize, memidx, ra); + } else if (!mte_checks_needed(toaddr, mtedesc)) { + mtedesc = 0; + } + + /* Do the actual memset: we leave the last partial page to SETE */ + stagesetsize = setsize & TARGET_PAGE_MASK; + while (stagesetsize > 0) { + step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra); + toaddr += step; + setsize -= step; + stagesetsize -= step; + env->xregs[rn] = -setsize; + if (stagesetsize > 0 && unlikely(cpu_loop_exit_requested(cs))) { + cpu_loop_exit_restore(cs, ra); + } + } +} + +void HELPER(setm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) +{ + do_setm(env, syndrome, mtedesc, set_step, false, GETPC()); +} + +void HELPER(setgm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) +{ + do_setm(env, syndrome, mtedesc, set_step_tags, true, GETPC()); +} + +static void do_sete(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, + StepFn *stepfn, bool is_setg, uintptr_t ra) +{ + /* Epilogue: do the last partial page */ + int rd = mops_destreg(syndrome); + int rs = mops_srcreg(syndrome); + int rn = mops_sizereg(syndrome); + uint8_t data = env->xregs[rs]; + uint64_t toaddr = env->xregs[rd] + env->xregs[rn]; + uint64_t setsize = -env->xregs[rn]; + uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX); + uint64_t step; + + check_mops_enabled(env, ra); + + /* + * We're allowed to NOP out "no data to copy" before the consistency + * checks; we choose to do so. + */ + if (setsize == 0) { + return; + } + + check_mops_wrong_option(env, syndrome, ra); + + /* + * Our implementation has no address alignment requirements, but + * we do want to enforce the "less than a page" size requirement, + * so we don't need to have the "check for interrupts" here. + */ + if (setsize >= TARGET_PAGE_SIZE) { + raise_exception_ra(env, EXCP_UDEF, syndrome, + mops_mismatch_exception_target_el(env), ra); + } + + if (unlikely(is_setg)) { + check_setg_alignment(env, toaddr, setsize, memidx, ra); + } else if (!mte_checks_needed(toaddr, mtedesc)) { + mtedesc = 0; + } + + /* Do the actual memset */ + while (setsize > 0) { + step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra); + toaddr += step; + setsize -= step; + env->xregs[rn] = -setsize; + } +} + +void HELPER(sete)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) +{ + do_sete(env, syndrome, mtedesc, set_step, false, GETPC()); +} + +void HELPER(setge)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) +{ + do_sete(env, syndrome, mtedesc, set_step_tags, true, GETPC()); +} + +/* + * Perform part of a memory copy from the guest memory at fromaddr + * and extending for copysize bytes, to the guest memory at + * toaddr. Both addreses are dirty. + * + * Returns the number of bytes actually set, which might be less than + * copysize; the caller should loop until the whole copy has been done. + * The caller should ensure that the guest registers are correct + * for the possibility that the first byte of the copy encounters + * an exception or watchpoint. We guarantee not to take any faults + * for bytes other than the first. + */ +static uint64_t copy_step(CPUARMState *env, uint64_t toaddr, uint64_t fromaddr, + uint64_t copysize, int wmemidx, int rmemidx, + uint32_t *wdesc, uint32_t *rdesc, uintptr_t ra) +{ + void *rmem; + void *wmem; + + /* Don't cross a page boundary on either source or destination */ + copysize = MIN(copysize, page_limit(toaddr)); + copysize = MIN(copysize, page_limit(fromaddr)); + /* + * Handle MTE tag checks: either handle the tag mismatch for byte 0, + * or else copy up to but not including the byte with the mismatch. + */ + if (*rdesc) { + uint64_t mtesize = mte_mops_probe(env, fromaddr, copysize, *rdesc); + if (mtesize == 0) { + mte_check_fail(env, *rdesc, fromaddr, ra); + *rdesc = 0; + } else { + copysize = MIN(copysize, mtesize); + } + } + if (*wdesc) { + uint64_t mtesize = mte_mops_probe(env, toaddr, copysize, *wdesc); + if (mtesize == 0) { + mte_check_fail(env, *wdesc, toaddr, ra); + *wdesc = 0; + } else { + copysize = MIN(copysize, mtesize); + } + } + + toaddr = useronly_clean_ptr(toaddr); + fromaddr = useronly_clean_ptr(fromaddr); + /* Trapless lookup of whether we can get a host memory pointer */ + wmem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, wmemidx); + rmem = tlb_vaddr_to_host(env, fromaddr, MMU_DATA_LOAD, rmemidx); + +#ifndef CONFIG_USER_ONLY + /* + * If we don't have host memory for both source and dest then just + * do a single byte copy. This will handle watchpoints, invalid pages, + * etc correctly. For clean code pages, the next iteration will see + * the page dirty and will use the fast path. + */ + if (unlikely(!rmem || !wmem)) { + uint8_t byte; + if (rmem) { + byte = *(uint8_t *)rmem; + } else { + byte = cpu_ldub_mmuidx_ra(env, fromaddr, rmemidx, ra); + } + if (wmem) { + *(uint8_t *)wmem = byte; + } else { + cpu_stb_mmuidx_ra(env, toaddr, byte, wmemidx, ra); + } + return 1; + } +#endif + /* Easy case: just memmove the host memory */ + memmove(wmem, rmem, copysize); + return copysize; +} + +/* + * Do part of a backwards memory copy. Here toaddr and fromaddr point + * to the *last* byte to be copied. + */ +static uint64_t copy_step_rev(CPUARMState *env, uint64_t toaddr, + uint64_t fromaddr, + uint64_t copysize, int wmemidx, int rmemidx, + uint32_t *wdesc, uint32_t *rdesc, uintptr_t ra) +{ + void *rmem; + void *wmem; + + /* Don't cross a page boundary on either source or destination */ + copysize = MIN(copysize, page_limit_rev(toaddr)); + copysize = MIN(copysize, page_limit_rev(fromaddr)); + + /* + * Handle MTE tag checks: either handle the tag mismatch for byte 0, + * or else copy up to but not including the byte with the mismatch. + */ + if (*rdesc) { + uint64_t mtesize = mte_mops_probe_rev(env, fromaddr, copysize, *rdesc); + if (mtesize == 0) { + mte_check_fail(env, *rdesc, fromaddr, ra); + *rdesc = 0; + } else { + copysize = MIN(copysize, mtesize); + } + } + if (*wdesc) { + uint64_t mtesize = mte_mops_probe_rev(env, toaddr, copysize, *wdesc); + if (mtesize == 0) { + mte_check_fail(env, *wdesc, toaddr, ra); + *wdesc = 0; + } else { + copysize = MIN(copysize, mtesize); + } + } + + toaddr = useronly_clean_ptr(toaddr); + fromaddr = useronly_clean_ptr(fromaddr); + /* Trapless lookup of whether we can get a host memory pointer */ + wmem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, wmemidx); + rmem = tlb_vaddr_to_host(env, fromaddr, MMU_DATA_LOAD, rmemidx); + +#ifndef CONFIG_USER_ONLY + /* + * If we don't have host memory for both source and dest then just + * do a single byte copy. This will handle watchpoints, invalid pages, + * etc correctly. For clean code pages, the next iteration will see + * the page dirty and will use the fast path. + */ + if (unlikely(!rmem || !wmem)) { + uint8_t byte; + if (rmem) { + byte = *(uint8_t *)rmem; + } else { + byte = cpu_ldub_mmuidx_ra(env, fromaddr, rmemidx, ra); + } + if (wmem) { + *(uint8_t *)wmem = byte; + } else { + cpu_stb_mmuidx_ra(env, toaddr, byte, wmemidx, ra); + } + return 1; + } +#endif + /* + * Easy case: just memmove the host memory. Note that wmem and + * rmem here point to the *last* byte to copy. + */ + memmove(wmem - (copysize - 1), rmem - (copysize - 1), copysize); + return copysize; +} + +/* + * for the Memory Copy operation, our implementation chooses always + * to use "option A", where we update Xd and Xs to the final addresses + * in the CPYP insn, and then in CPYM and CPYE only need to update Xn. + * + * @env: CPU + * @syndrome: syndrome value for mismatch exceptions + * (also contains the register numbers we need to use) + * @wdesc: MTE descriptor for the writes (destination) + * @rdesc: MTE descriptor for the reads (source) + * @move: true if this is CPY (memmove), false for CPYF (memcpy forwards) + */ +static void do_cpyp(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, + uint32_t rdesc, uint32_t move, uintptr_t ra) +{ + int rd = mops_destreg(syndrome); + int rs = mops_srcreg(syndrome); + int rn = mops_sizereg(syndrome); + uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX); + uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX); + bool forwards = true; + uint64_t toaddr = env->xregs[rd]; + uint64_t fromaddr = env->xregs[rs]; + uint64_t copysize = env->xregs[rn]; + uint64_t stagecopysize, step; + + check_mops_enabled(env, ra); + + + if (move) { + /* + * Copy backwards if necessary. The direction for a non-overlapping + * copy is IMPDEF; we choose forwards. + */ + if (copysize > 0x007FFFFFFFFFFFFFULL) { + copysize = 0x007FFFFFFFFFFFFFULL; + } + uint64_t fs = extract64(fromaddr, 0, 56); + uint64_t ts = extract64(toaddr, 0, 56); + uint64_t fe = extract64(fromaddr + copysize, 0, 56); + + if (fs < ts && fe > ts) { + forwards = false; + } + } else { + if (copysize > INT64_MAX) { + copysize = INT64_MAX; + } + } + + if (!mte_checks_needed(fromaddr, rdesc)) { + rdesc = 0; + } + if (!mte_checks_needed(toaddr, wdesc)) { + wdesc = 0; + } + + if (forwards) { + stagecopysize = MIN(copysize, page_limit(toaddr)); + stagecopysize = MIN(stagecopysize, page_limit(fromaddr)); + while (stagecopysize) { + env->xregs[rd] = toaddr; + env->xregs[rs] = fromaddr; + env->xregs[rn] = copysize; + step = copy_step(env, toaddr, fromaddr, stagecopysize, + wmemidx, rmemidx, &wdesc, &rdesc, ra); + toaddr += step; + fromaddr += step; + copysize -= step; + stagecopysize -= step; + } + /* Insn completed, so update registers to the Option A format */ + env->xregs[rd] = toaddr + copysize; + env->xregs[rs] = fromaddr + copysize; + env->xregs[rn] = -copysize; + } else { + /* + * In a reverse copy the to and from addrs in Xs and Xd are the start + * of the range, but it's more convenient for us to work with pointers + * to the last byte being copied. + */ + toaddr += copysize - 1; + fromaddr += copysize - 1; + stagecopysize = MIN(copysize, page_limit_rev(toaddr)); + stagecopysize = MIN(stagecopysize, page_limit_rev(fromaddr)); + while (stagecopysize) { + env->xregs[rn] = copysize; + step = copy_step_rev(env, toaddr, fromaddr, stagecopysize, + wmemidx, rmemidx, &wdesc, &rdesc, ra); + copysize -= step; + stagecopysize -= step; + toaddr -= step; + fromaddr -= step; + } + /* + * Insn completed, so update registers to the Option A format. + * For a reverse copy this is no different to the CPYP input format. + */ + env->xregs[rn] = copysize; + } + + /* Set NZCV = 0000 to indicate we are an Option A implementation */ + env->NF = 0; + env->ZF = 1; /* our env->ZF encoding is inverted */ + env->CF = 0; + env->VF = 0; + return; +} + +void HELPER(cpyp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, + uint32_t rdesc) +{ + do_cpyp(env, syndrome, wdesc, rdesc, true, GETPC()); +} + +void HELPER(cpyfp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, + uint32_t rdesc) +{ + do_cpyp(env, syndrome, wdesc, rdesc, false, GETPC()); +} + +static void do_cpym(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, + uint32_t rdesc, uint32_t move, uintptr_t ra) +{ + /* Main: we choose to copy until less than a page remaining */ + CPUState *cs = env_cpu(env); + int rd = mops_destreg(syndrome); + int rs = mops_srcreg(syndrome); + int rn = mops_sizereg(syndrome); + uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX); + uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX); + bool forwards = true; + uint64_t toaddr, fromaddr, copysize, step; + + check_mops_enabled(env, ra); + + /* We choose to NOP out "no data to copy" before consistency checks */ + if (env->xregs[rn] == 0) { + return; + } + + check_mops_wrong_option(env, syndrome, ra); + + if (move) { + forwards = (int64_t)env->xregs[rn] < 0; + } + + if (forwards) { + toaddr = env->xregs[rd] + env->xregs[rn]; + fromaddr = env->xregs[rs] + env->xregs[rn]; + copysize = -env->xregs[rn]; + } else { + copysize = env->xregs[rn]; + /* This toaddr and fromaddr point to the *last* byte to copy */ + toaddr = env->xregs[rd] + copysize - 1; + fromaddr = env->xregs[rs] + copysize - 1; + } + + if (!mte_checks_needed(fromaddr, rdesc)) { + rdesc = 0; + } + if (!mte_checks_needed(toaddr, wdesc)) { + wdesc = 0; + } + + /* Our implementation has no particular parameter requirements for CPYM */ + + /* Do the actual memmove */ + if (forwards) { + while (copysize >= TARGET_PAGE_SIZE) { + step = copy_step(env, toaddr, fromaddr, copysize, + wmemidx, rmemidx, &wdesc, &rdesc, ra); + toaddr += step; + fromaddr += step; + copysize -= step; + env->xregs[rn] = -copysize; + if (copysize >= TARGET_PAGE_SIZE && + unlikely(cpu_loop_exit_requested(cs))) { + cpu_loop_exit_restore(cs, ra); + } + } + } else { + while (copysize >= TARGET_PAGE_SIZE) { + step = copy_step_rev(env, toaddr, fromaddr, copysize, + wmemidx, rmemidx, &wdesc, &rdesc, ra); + toaddr -= step; + fromaddr -= step; + copysize -= step; + env->xregs[rn] = copysize; + if (copysize >= TARGET_PAGE_SIZE && + unlikely(cpu_loop_exit_requested(cs))) { + cpu_loop_exit_restore(cs, ra); + } + } + } +} + +void HELPER(cpym)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, + uint32_t rdesc) +{ + do_cpym(env, syndrome, wdesc, rdesc, true, GETPC()); +} + +void HELPER(cpyfm)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, + uint32_t rdesc) +{ + do_cpym(env, syndrome, wdesc, rdesc, false, GETPC()); +} + +static void do_cpye(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, + uint32_t rdesc, uint32_t move, uintptr_t ra) +{ + /* Epilogue: do the last partial page */ + int rd = mops_destreg(syndrome); + int rs = mops_srcreg(syndrome); + int rn = mops_sizereg(syndrome); + uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX); + uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX); + bool forwards = true; + uint64_t toaddr, fromaddr, copysize, step; + + check_mops_enabled(env, ra); + + /* We choose to NOP out "no data to copy" before consistency checks */ + if (env->xregs[rn] == 0) { + return; + } + + check_mops_wrong_option(env, syndrome, ra); + + if (move) { + forwards = (int64_t)env->xregs[rn] < 0; + } + + if (forwards) { + toaddr = env->xregs[rd] + env->xregs[rn]; + fromaddr = env->xregs[rs] + env->xregs[rn]; + copysize = -env->xregs[rn]; + } else { + copysize = env->xregs[rn]; + /* This toaddr and fromaddr point to the *last* byte to copy */ + toaddr = env->xregs[rd] + copysize - 1; + fromaddr = env->xregs[rs] + copysize - 1; + } + + if (!mte_checks_needed(fromaddr, rdesc)) { + rdesc = 0; + } + if (!mte_checks_needed(toaddr, wdesc)) { + wdesc = 0; + } + + /* Check the size; we don't want to have do a check-for-interrupts */ + if (copysize >= TARGET_PAGE_SIZE) { + raise_exception_ra(env, EXCP_UDEF, syndrome, + mops_mismatch_exception_target_el(env), ra); + } + + /* Do the actual memmove */ + if (forwards) { + while (copysize > 0) { + step = copy_step(env, toaddr, fromaddr, copysize, + wmemidx, rmemidx, &wdesc, &rdesc, ra); + toaddr += step; + fromaddr += step; + copysize -= step; + env->xregs[rn] = -copysize; + } + } else { + while (copysize > 0) { + step = copy_step_rev(env, toaddr, fromaddr, copysize, + wmemidx, rmemidx, &wdesc, &rdesc, ra); + toaddr -= step; + fromaddr -= step; + copysize -= step; + env->xregs[rn] = copysize; + } + } +} + +void HELPER(cpye)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, + uint32_t rdesc) +{ + do_cpye(env, syndrome, wdesc, rdesc, true, GETPC()); +} + +void HELPER(cpyfe)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, + uint32_t rdesc) +{ + do_cpye(env, syndrome, wdesc, rdesc, false, GETPC()); +} diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h index 57cfd68569..575a5dab7d 100644 --- a/target/arm/tcg/helper-a64.h +++ b/target/arm/tcg/helper-a64.h @@ -117,3 +117,17 @@ DEF_HELPER_FLAGS_3(stzgm_tags, TCG_CALL_NO_WG, void, env, i64, i64) DEF_HELPER_FLAGS_4(unaligned_access, TCG_CALL_NO_WG, noreturn, env, i64, i32, i32) + +DEF_HELPER_3(setp, void, env, i32, i32) +DEF_HELPER_3(setm, void, env, i32, i32) +DEF_HELPER_3(sete, void, env, i32, i32) +DEF_HELPER_3(setgp, void, env, i32, i32) +DEF_HELPER_3(setgm, void, env, i32, i32) +DEF_HELPER_3(setge, void, env, i32, i32) + +DEF_HELPER_4(cpyp, void, env, i32, i32, i32) +DEF_HELPER_4(cpym, void, env, i32, i32, i32) +DEF_HELPER_4(cpye, void, env, i32, i32, i32) +DEF_HELPER_4(cpyfp, void, env, i32, i32, i32) +DEF_HELPER_4(cpyfm, void, env, i32, i32, i32) +DEF_HELPER_4(cpyfe, void, env, i32, i32, i32) diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index 616c5fa723..cea1adb7b6 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -306,6 +306,15 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, && !(env->pstate & PSTATE_TCO) && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) { DP_TBFLAG_A64(flags, MTE_ACTIVE, 1); + if (!EX_TBFLAG_A64(flags, UNPRIV)) { + /* + * In non-unpriv contexts (eg EL0), unpriv load/stores + * act like normal ones; duplicate the MTE info to + * avoid translate-a64.c having to check UNPRIV to see + * whether it is OK to index into MTE_ACTIVE[]. + */ + DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); + } } } /* And again for unprivileged accesses, if required. */ @@ -316,6 +325,18 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, && allocation_tag_access_enabled(env, 0, sctlr)) { DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); } + /* + * For unpriv tag-setting accesses we alse need ATA0. Again, in + * contexts where unpriv and normal insns are the same we + * duplicate the ATA bit to save effort for translate-a64.c. + */ + if (EX_TBFLAG_A64(flags, UNPRIV)) { + if (allocation_tag_access_enabled(env, 0, sctlr)) { + DP_TBFLAG_A64(flags, ATA0, 1); + } + } else { + DP_TBFLAG_A64(flags, ATA0, EX_TBFLAG_A64(flags, ATA)); + } /* Cache TCMA as well as TBI. */ DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); } diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index b23d11563a..2dd7eb3edb 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -50,14 +50,14 @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) } /** - * allocation_tag_mem: + * allocation_tag_mem_probe: * @env: the cpu environment * @ptr_mmu_idx: the addressing regime to use for the virtual address * @ptr: the virtual address for which to look up tag memory * @ptr_access: the access to use for the virtual address * @ptr_size: the number of bytes in the normal memory access * @tag_access: the access to use for the tag memory - * @tag_size: the number of bytes in the tag memory access + * @probe: true to merely probe, never taking an exception * @ra: the return address for exception handling * * Our tag memory is formatted as a sequence of little-endian nibbles. @@ -66,18 +66,25 @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) * for the higher addr. * * Here, resolve the physical address from the virtual address, and return - * a pointer to the corresponding tag byte. Exit with exception if the - * virtual address is not accessible for @ptr_access. - * - * The @ptr_size and @tag_size values may not have an obvious relation - * due to the alignment of @ptr, and the number of tag checks required. + * a pointer to the corresponding tag byte. * * If there is no tag storage corresponding to @ptr, return NULL. + * + * If the page is inaccessible for @ptr_access, or has a watchpoint, there are + * three options: + * (1) probe = true, ra = 0 : pure probe -- we return NULL if the page is not + * accessible, and do not take watchpoint traps. The calling code must + * handle those cases in the right priority compared to MTE traps. + * (2) probe = false, ra = 0 : probe, no fault expected -- the caller guarantees + * that the page is going to be accessible. We will take watchpoint traps. + * (3) probe = false, ra != 0 : non-probe -- we will take both memory access + * traps and watchpoint traps. + * (probe = true, ra != 0 is invalid and will assert.) */ -static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, - uint64_t ptr, MMUAccessType ptr_access, - int ptr_size, MMUAccessType tag_access, - int tag_size, uintptr_t ra) +static uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx, + uint64_t ptr, MMUAccessType ptr_access, + int ptr_size, MMUAccessType tag_access, + bool probe, uintptr_t ra) { #ifdef CONFIG_USER_ONLY uint64_t clean_ptr = useronly_clean_ptr(ptr); @@ -85,6 +92,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, uint8_t *tags; uintptr_t index; + assert(!(probe && ra)); + if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) { cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access, !(flags & PAGE_VALID), ra); @@ -115,12 +124,16 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, * exception for inaccessible pages, and resolves the virtual address * into the softmmu tlb. * - * When RA == 0, this is for mte_probe. The page is expected to be - * valid. Indicate to probe_access_flags no-fault, then assert that - * we received a valid page. + * When RA == 0, this is either a pure probe or a no-fault-expected probe. + * Indicate to probe_access_flags no-fault, then either return NULL + * for the pure probe, or assert that we received a valid page for the + * no-fault-expected probe. */ flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx, ra == 0, &host, &full, ra); + if (probe && (flags & TLB_INVALID_MASK)) { + return NULL; + } assert(!(flags & TLB_INVALID_MASK)); /* If the virtual page MemAttr != Tagged, access unchecked. */ @@ -161,7 +174,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, } /* Any debug exception has priority over a tag check exception. */ - if (unlikely(flags & TLB_WATCHPOINT)) { + if (!probe && unlikely(flags & TLB_WATCHPOINT)) { int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE; assert(ra != 0); cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra); @@ -203,6 +216,15 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, #endif } +static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, + uint64_t ptr, MMUAccessType ptr_access, + int ptr_size, MMUAccessType tag_access, + uintptr_t ra) +{ + return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access, + ptr_size, tag_access, false, ra); +} + uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm) { uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16); @@ -275,7 +297,7 @@ uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt) /* Trap if accessing an invalid page. */ mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1, - MMU_DATA_LOAD, 1, GETPC()); + MMU_DATA_LOAD, GETPC()); /* Load if page supports tags. */ if (mem) { @@ -329,7 +351,7 @@ static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt, /* Trap if accessing an invalid page. */ mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE, - MMU_DATA_STORE, 1, ra); + MMU_DATA_STORE, ra); /* Store if page supports tags. */ if (mem) { @@ -372,10 +394,10 @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, if (ptr & TAG_GRANULE) { /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */ mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, - TAG_GRANULE, MMU_DATA_STORE, 1, ra); + TAG_GRANULE, MMU_DATA_STORE, ra); mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE, MMU_DATA_STORE, TAG_GRANULE, - MMU_DATA_STORE, 1, ra); + MMU_DATA_STORE, ra); /* Store if page(s) support tags. */ if (mem1) { @@ -387,7 +409,7 @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, } else { /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */ mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, - 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra); + 2 * TAG_GRANULE, MMU_DATA_STORE, ra); if (mem1) { tag |= tag << 4; qatomic_set(mem1, tag); @@ -435,8 +457,7 @@ uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) /* Trap if accessing an invalid page. */ tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, - gm_bs_bytes, MMU_DATA_LOAD, - gm_bs_bytes / (2 * TAG_GRANULE), ra); + gm_bs_bytes, MMU_DATA_LOAD, ra); /* The tag is squashed to zero if the page does not support tags. */ if (!tag_mem) { @@ -495,8 +516,7 @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) /* Trap if accessing an invalid page. */ tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, - gm_bs_bytes, MMU_DATA_LOAD, - gm_bs_bytes / (2 * TAG_GRANULE), ra); + gm_bs_bytes, MMU_DATA_LOAD, ra); /* * Tag store only happens if the page support tags, @@ -552,7 +572,7 @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val) ptr &= -dcz_bytes; mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes, - MMU_DATA_STORE, tag_bytes, ra); + MMU_DATA_STORE, ra); if (mem) { int tag_pair = (val & 0xf) * 0x11; memset(mem, tag_pair, tag_bytes); @@ -597,8 +617,8 @@ static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr, } /* Record a tag check failure. */ -static void mte_check_fail(CPUARMState *env, uint32_t desc, - uint64_t dirty_ptr, uintptr_t ra) +void mte_check_fail(CPUARMState *env, uint32_t desc, + uint64_t dirty_ptr, uintptr_t ra) { int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx); @@ -715,6 +735,55 @@ static int checkN(uint8_t *mem, int odd, int cmp, int count) } /** + * checkNrev: + * @tag: tag memory to test + * @odd: true to begin testing at tags at odd nibble + * @cmp: the tag to compare against + * @count: number of tags to test + * + * Return the number of successful tests. + * Thus a return value < @count indicates a failure. + * + * This is like checkN, but it runs backwards, checking the + * tags starting with @tag and then the tags preceding it. + * This is needed by the backwards-memory-copying operations. + */ +static int checkNrev(uint8_t *mem, int odd, int cmp, int count) +{ + int n = 0, diff; + + /* Replicate the test tag and compare. */ + cmp *= 0x11; + diff = *mem-- ^ cmp; + + if (!odd) { + goto start_even; + } + + while (1) { + /* Test odd tag. */ + if (unlikely((diff) & 0xf0)) { + break; + } + if (++n == count) { + break; + } + + start_even: + /* Test even tag. */ + if (unlikely((diff) & 0x0f)) { + break; + } + if (++n == count) { + break; + } + + diff = *mem-- ^ cmp; + } + return n; +} + +/** * mte_probe_int() - helper for mte_probe and mte_check * @env: CPU environment * @desc: MTEDESC descriptor @@ -732,8 +801,7 @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, int mmu_idx, ptr_tag, bit55; uint64_t ptr_last, prev_page, next_page; uint64_t tag_first, tag_last; - uint64_t tag_byte_first, tag_byte_last; - uint32_t sizem1, tag_count, tag_size, n, c; + uint32_t sizem1, tag_count, n, c; uint8_t *mem1, *mem2; MMUAccessType type; @@ -763,19 +831,14 @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE); tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; - /* Round the bounds to twice the tag granule, and compute the bytes. */ - tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE); - tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE); - /* Locate the page boundaries. */ prev_page = ptr & TARGET_PAGE_MASK; next_page = prev_page + TARGET_PAGE_SIZE; if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) { /* Memory access stays on one page. */ - tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1; mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1, - MMU_DATA_LOAD, tag_size, ra); + MMU_DATA_LOAD, ra); if (!mem1) { return 1; } @@ -783,14 +846,12 @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count); } else { /* Memory access crosses to next page. */ - tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE); mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr, - MMU_DATA_LOAD, tag_size, ra); + MMU_DATA_LOAD, ra); - tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1; mem2 = allocation_tag_mem(env, mmu_idx, next_page, type, ptr_last - next_page + 1, - MMU_DATA_LOAD, tag_size, ra); + MMU_DATA_LOAD, ra); /* * Perform all of the comparisons. @@ -918,7 +979,7 @@ uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr) mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); (void) probe_write(env, ptr, 1, mmu_idx, ra); mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE, - dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra); + dcz_bytes, MMU_DATA_LOAD, ra); if (!mem) { goto done; } @@ -979,3 +1040,143 @@ uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr) done: return useronly_clean_ptr(ptr); } + +uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, + uint32_t desc) +{ + int mmu_idx, tag_count; + uint64_t ptr_tag, tag_first, tag_last; + void *mem; + bool w = FIELD_EX32(desc, MTEDESC, WRITE); + uint32_t n; + + mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); + /* True probe; this will never fault */ + mem = allocation_tag_mem_probe(env, mmu_idx, ptr, + w ? MMU_DATA_STORE : MMU_DATA_LOAD, + size, MMU_DATA_LOAD, true, 0); + if (!mem) { + return size; + } + + /* + * TODO: checkN() is not designed for checks of the size we expect + * for FEAT_MOPS operations, so we should implement this differently. + * Maybe we should do something like + * if (region start and size are aligned nicely) { + * do direct loads of 64 tag bits at a time; + * } else { + * call checkN() + * } + */ + /* Round the bounds to the tag granule, and compute the number of tags. */ + ptr_tag = allocation_tag_from_addr(ptr); + tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); + tag_last = QEMU_ALIGN_DOWN(ptr + size - 1, TAG_GRANULE); + tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; + n = checkN(mem, ptr & TAG_GRANULE, ptr_tag, tag_count); + if (likely(n == tag_count)) { + return size; + } + + /* + * Failure; for the first granule, it's at @ptr. Otherwise + * it's at the first byte of the nth granule. Calculate how + * many bytes we can access without hitting that failure. + */ + if (n == 0) { + return 0; + } else { + return n * TAG_GRANULE - (ptr - tag_first); + } +} + +uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, + uint32_t desc) +{ + int mmu_idx, tag_count; + uint64_t ptr_tag, tag_first, tag_last; + void *mem; + bool w = FIELD_EX32(desc, MTEDESC, WRITE); + uint32_t n; + + mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); + /* True probe; this will never fault */ + mem = allocation_tag_mem_probe(env, mmu_idx, ptr, + w ? MMU_DATA_STORE : MMU_DATA_LOAD, + size, MMU_DATA_LOAD, true, 0); + if (!mem) { + return size; + } + + /* + * TODO: checkNrev() is not designed for checks of the size we expect + * for FEAT_MOPS operations, so we should implement this differently. + * Maybe we should do something like + * if (region start and size are aligned nicely) { + * do direct loads of 64 tag bits at a time; + * } else { + * call checkN() + * } + */ + /* Round the bounds to the tag granule, and compute the number of tags. */ + ptr_tag = allocation_tag_from_addr(ptr); + tag_first = QEMU_ALIGN_DOWN(ptr - (size - 1), TAG_GRANULE); + tag_last = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); + tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; + n = checkNrev(mem, ptr & TAG_GRANULE, ptr_tag, tag_count); + if (likely(n == tag_count)) { + return size; + } + + /* + * Failure; for the first granule, it's at @ptr. Otherwise + * it's at the last byte of the nth granule. Calculate how + * many bytes we can access without hitting that failure. + */ + if (n == 0) { + return 0; + } else { + return (n - 1) * TAG_GRANULE + ((ptr + 1) - tag_last); + } +} + +void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size, + uint32_t desc) +{ + int mmu_idx, tag_count; + uint64_t ptr_tag; + void *mem; + + if (!desc) { + /* Tags not actually enabled */ + return; + } + + mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); + /* True probe: this will never fault */ + mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE, size, + MMU_DATA_STORE, true, 0); + if (!mem) { + return; + } + + /* + * We know that ptr and size are both TAG_GRANULE aligned; store + * the tag from the pointer value into the tag memory. + */ + ptr_tag = allocation_tag_from_addr(ptr); + tag_count = size / TAG_GRANULE; + if (ptr & TAG_GRANULE) { + /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */ + store_tag1_parallel(TAG_GRANULE, mem, ptr_tag); + mem++; + tag_count--; + } + memset(mem, ptr_tag | (ptr_tag << 4), tag_count / 2); + if (tag_count & 1) { + /* Final trailing unaligned nibble */ + mem += tag_count / 2; + store_tag1_parallel(0, mem, ptr_tag); + } +} diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 1b6fbb61e2..97f25b4451 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -105,9 +105,17 @@ void a64_translate_init(void) } /* - * Return the core mmu_idx to use for A64 "unprivileged load/store" insns + * Return the core mmu_idx to use for A64 load/store insns which + * have a "unprivileged load/store" variant. Those insns access + * EL0 if executed from an EL which has control over EL0 (usually + * EL1) but behave like normal loads and stores if executed from + * elsewhere (eg EL3). + * + * @unpriv : true for the unprivileged encoding; false for the + * normal encoding (in which case we will return the same + * thing as get_mem_index(). */ -static int get_a64_user_mem_index(DisasContext *s) +static int get_a64_user_mem_index(DisasContext *s, bool unpriv) { /* * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL, @@ -115,7 +123,7 @@ static int get_a64_user_mem_index(DisasContext *s) */ ARMMMUIdx useridx = s->mmu_idx; - if (s->unpriv) { + if (unpriv && s->unpriv) { /* * We have pre-computed the condition for AccType_UNPRIV. * Therefore we should never get here with a mmu_idx for @@ -1453,6 +1461,10 @@ static bool trans_TBZ(DisasContext *s, arg_tbz *a) static bool trans_B_cond(DisasContext *s, arg_B_cond *a) { + /* BC.cond is only present with FEAT_HBC */ + if (a->c && !dc_isar_feature(aa64_hbc, s)) { + return false; + } reset_btype(s); if (a->cond < 0x0e) { /* genuinely conditional branches */ @@ -2260,7 +2272,7 @@ static void handle_sys(DisasContext *s, bool isread, clean_addr = clean_data_tbi(s, tcg_rt); gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8); - if (s->ata) { + if (s->ata[0]) { /* Extract the tag from the register to match STZGM. */ tag = tcg_temp_new_i64(); tcg_gen_shri_i64(tag, tcg_rt, 56); @@ -2277,7 +2289,7 @@ static void handle_sys(DisasContext *s, bool isread, clean_addr = clean_data_tbi(s, tcg_rt); gen_helper_dc_zva(cpu_env, clean_addr); - if (s->ata) { + if (s->ata[0]) { /* Extract the tag from the register to match STZGM. */ tag = tcg_temp_new_i64(); tcg_gen_shri_i64(tag, tcg_rt, 56); @@ -3058,7 +3070,7 @@ static bool trans_STGP(DisasContext *s, arg_ldstpair *a) tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop); /* Perform the tag store, if tag access enabled. */ - if (s->ata) { + if (s->ata[0]) { if (tb_cflags(s->base.tb) & CF_PARALLEL) { gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr); } else { @@ -3084,7 +3096,7 @@ static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a, if (!a->p) { tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset); } - memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); + memidx = get_a64_user_mem_index(s, a->unpriv); *clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store, a->w || a->rn != 31, mop, a->unpriv, memidx); @@ -3105,7 +3117,7 @@ static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a) { bool iss_sf, iss_valid = !a->w; TCGv_i64 clean_addr, dirty_addr, tcg_rt; - int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); + int memidx = get_a64_user_mem_index(s, a->unpriv); MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN); op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop); @@ -3123,7 +3135,7 @@ static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a) { bool iss_sf, iss_valid = !a->w; TCGv_i64 clean_addr, dirty_addr, tcg_rt; - int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); + int memidx = get_a64_user_mem_index(s, a->unpriv); MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN); op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop); @@ -3756,7 +3768,7 @@ static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a) tcg_gen_addi_i64(addr, addr, a->imm); tcg_rt = cpu_reg(s, a->rt); - if (s->ata) { + if (s->ata[0]) { gen_helper_stzgm_tags(cpu_env, addr, tcg_rt); } /* @@ -3788,7 +3800,7 @@ static bool trans_STGM(DisasContext *s, arg_ldst_tag *a) tcg_gen_addi_i64(addr, addr, a->imm); tcg_rt = cpu_reg(s, a->rt); - if (s->ata) { + if (s->ata[0]) { gen_helper_stgm(cpu_env, addr, tcg_rt); } else { MMUAccessType acc = MMU_DATA_STORE; @@ -3820,7 +3832,7 @@ static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a) tcg_gen_addi_i64(addr, addr, a->imm); tcg_rt = cpu_reg(s, a->rt); - if (s->ata) { + if (s->ata[0]) { gen_helper_ldgm(tcg_rt, cpu_env, addr); } else { MMUAccessType acc = MMU_DATA_LOAD; @@ -3855,7 +3867,7 @@ static bool trans_LDG(DisasContext *s, arg_ldst_tag *a) tcg_gen_andi_i64(addr, addr, -TAG_GRANULE); tcg_rt = cpu_reg(s, a->rt); - if (s->ata) { + if (s->ata[0]) { gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt); } else { /* @@ -3892,7 +3904,7 @@ static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool is_pair) tcg_gen_addi_i64(addr, addr, a->imm); } tcg_rt = cpu_reg_sp(s, a->rt); - if (!s->ata) { + if (!s->ata[0]) { /* * For STG and ST2G, we need to check alignment and probe memory. * TODO: For STZG and STZ2G, we could rely on the stores below, @@ -3950,6 +3962,123 @@ TRANS_FEAT(STZG, aa64_mte_insn_reg, do_STG, a, true, false) TRANS_FEAT(ST2G, aa64_mte_insn_reg, do_STG, a, false, true) TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true) +typedef void SetFn(TCGv_env, TCGv_i32, TCGv_i32); + +static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, + bool is_setg, SetFn fn) +{ + int memidx; + uint32_t syndrome, desc = 0; + + if (is_setg && !dc_isar_feature(aa64_mte, s)) { + return false; + } + + /* + * UNPREDICTABLE cases: we choose to UNDEF, which allows + * us to pull this check before the CheckMOPSEnabled() test + * (which we do in the helper function) + */ + if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd || + a->rd == 31 || a->rn == 31) { + return false; + } + + memidx = get_a64_user_mem_index(s, a->unpriv); + + /* + * We pass option_a == true, matching our implementation; + * we pass wrong_option == false: helper function may set that bit. + */ + syndrome = syn_mop(true, is_setg, (a->nontemp << 1) | a->unpriv, + is_epilogue, false, true, a->rd, a->rs, a->rn); + + if (is_setg ? s->ata[a->unpriv] : s->mte_active[a->unpriv]) { + /* We may need to do MTE tag checking, so assemble the descriptor */ + desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); + desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); + desc = FIELD_DP32(desc, MTEDESC, WRITE, true); + /* SIZEM1 and ALIGN we leave 0 (byte write) */ + } + /* The helper function always needs the memidx even with MTE disabled */ + desc = FIELD_DP32(desc, MTEDESC, MIDX, memidx); + + /* + * The helper needs the register numbers, but since they're in + * the syndrome anyway, we let it extract them from there rather + * than passing in an extra three integer arguments. + */ + fn(cpu_env, tcg_constant_i32(syndrome), tcg_constant_i32(desc)); + return true; +} + +TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, false, gen_helper_setp) +TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, false, gen_helper_setm) +TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, false, gen_helper_sete) +TRANS_FEAT(SETGP, aa64_mops, do_SET, a, false, true, gen_helper_setgp) +TRANS_FEAT(SETGM, aa64_mops, do_SET, a, false, true, gen_helper_setgm) +TRANS_FEAT(SETGE, aa64_mops, do_SET, a, true, true, gen_helper_setge) + +typedef void CpyFn(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32); + +static bool do_CPY(DisasContext *s, arg_cpy *a, bool is_epilogue, CpyFn fn) +{ + int rmemidx, wmemidx; + uint32_t syndrome, rdesc = 0, wdesc = 0; + bool wunpriv = extract32(a->options, 0, 1); + bool runpriv = extract32(a->options, 1, 1); + + /* + * UNPREDICTABLE cases: we choose to UNDEF, which allows + * us to pull this check before the CheckMOPSEnabled() test + * (which we do in the helper function) + */ + if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd || + a->rd == 31 || a->rs == 31 || a->rn == 31) { + return false; + } + + rmemidx = get_a64_user_mem_index(s, runpriv); + wmemidx = get_a64_user_mem_index(s, wunpriv); + + /* + * We pass option_a == true, matching our implementation; + * we pass wrong_option == false: helper function may set that bit. + */ + syndrome = syn_mop(false, false, a->options, is_epilogue, + false, true, a->rd, a->rs, a->rn); + + /* If we need to do MTE tag checking, assemble the descriptors */ + if (s->mte_active[runpriv]) { + rdesc = FIELD_DP32(rdesc, MTEDESC, TBI, s->tbid); + rdesc = FIELD_DP32(rdesc, MTEDESC, TCMA, s->tcma); + } + if (s->mte_active[wunpriv]) { + wdesc = FIELD_DP32(wdesc, MTEDESC, TBI, s->tbid); + wdesc = FIELD_DP32(wdesc, MTEDESC, TCMA, s->tcma); + wdesc = FIELD_DP32(wdesc, MTEDESC, WRITE, true); + } + /* The helper function needs these parts of the descriptor regardless */ + rdesc = FIELD_DP32(rdesc, MTEDESC, MIDX, rmemidx); + wdesc = FIELD_DP32(wdesc, MTEDESC, MIDX, wmemidx); + + /* + * The helper needs the register numbers, but since they're in + * the syndrome anyway, we let it extract them from there rather + * than passing in an extra three integer arguments. + */ + fn(cpu_env, tcg_constant_i32(syndrome), tcg_constant_i32(wdesc), + tcg_constant_i32(rdesc)); + return true; +} + +TRANS_FEAT(CPYP, aa64_mops, do_CPY, a, false, gen_helper_cpyp) +TRANS_FEAT(CPYM, aa64_mops, do_CPY, a, false, gen_helper_cpym) +TRANS_FEAT(CPYE, aa64_mops, do_CPY, a, true, gen_helper_cpye) +TRANS_FEAT(CPYFP, aa64_mops, do_CPY, a, false, gen_helper_cpyfp) +TRANS_FEAT(CPYFM, aa64_mops, do_CPY, a, false, gen_helper_cpyfm) +TRANS_FEAT(CPYFE, aa64_mops, do_CPY, a, true, gen_helper_cpyfe) + typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64); static bool gen_rri(DisasContext *s, arg_rri_sf *a, @@ -4012,7 +4141,7 @@ static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a, tcg_rn = cpu_reg_sp(s, a->rn); tcg_rd = cpu_reg_sp(s, a->rd); - if (s->ata) { + if (s->ata[0]) { gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, tcg_constant_i32(imm), tcg_constant_i32(a->uimm4)); @@ -5399,7 +5528,7 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { goto do_unallocated; } - if (s->ata) { + if (s->ata[0]) { gen_helper_irg(cpu_reg_sp(s, rd), cpu_env, cpu_reg_sp(s, rn), cpu_reg(s, rm)); } else { @@ -13890,7 +14019,8 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->bt = EX_TBFLAG_A64(tb_flags, BT); dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE); dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV); - dc->ata = EX_TBFLAG_A64(tb_flags, ATA); + dc->ata[0] = EX_TBFLAG_A64(tb_flags, ATA); + dc->ata[1] = EX_TBFLAG_A64(tb_flags, ATA0); dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE); dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index f748ba6f39..63922f8bad 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -114,8 +114,8 @@ typedef struct DisasContext { bool unpriv; /* True if v8.3-PAuth is active. */ bool pauth_active; - /* True if v8.5-MTE access to tags is enabled. */ - bool ata; + /* True if v8.5-MTE access to tags is enabled; index with is_unpriv. */ + bool ata[2]; /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */ bool mte_active[2]; /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ diff --git a/target/m68k/m68k-semi.c b/target/m68k/m68k-semi.c index 239f6e44e9..80cd8d70db 100644 --- a/target/m68k/m68k-semi.c +++ b/target/m68k/m68k-semi.c @@ -15,6 +15,10 @@ * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. + * + * The semihosting protocol implemented here is described in the + * libgloss sources: + * https://sourceware.org/git/?p=newlib-cygwin.git;a=blob;f=libgloss/m68k/m68k-semi.txt;hb=HEAD */ #include "qemu/osdep.h" |