diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2020-06-25 20:31:35 -0700 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2020-06-26 14:31:12 +0100 |
commit | d28d12f008ee44dc2cc2ee5d8f673be9febc951e (patch) | |
tree | 7997410653269508154f6fd8a4174f9730e2ffac /target/arm/sve_helper.c | |
parent | 9473d0ecafcffc8b258892b1f9f18e037bdba958 (diff) |
target/arm: Add mte helpers for sve scatter/gather memory ops
Because the elements are non-sequential, we cannot eliminate many
tests straight away like we can for sequential operations. But
we often have the PTE details handy, so we can test for Tagged.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200626033144.790098-38-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm/sve_helper.c')
-rw-r--r-- | target/arm/sve_helper.c | 183 |
1 files changed, 152 insertions, 31 deletions
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index 7aca4ad384..ad974c2cc5 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -5354,7 +5354,8 @@ static target_ulong off_zd_d(void *reg, intptr_t reg_ofs) static inline QEMU_ALWAYS_INLINE void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t retaddr, - int esize, int msize, zreg_off_fn *off_fn, + uint32_t mtedesc, int esize, int msize, + zreg_off_fn *off_fn, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { @@ -5382,7 +5383,9 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, cpu_check_watchpoint(env_cpu(env), addr, msize, info.attrs, BP_MEM_READ, retaddr); } - /* TODO: MTE check */ + if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) { + mte_check1(env, mtedesc, addr, retaddr); + } host_fn(&scratch, reg_off, info.host); } else { /* Element crosses the page boundary. */ @@ -5393,7 +5396,9 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, msize, info.attrs, BP_MEM_READ, retaddr); } - /* TODO: MTE check */ + if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) { + mte_check1(env, mtedesc, addr, retaddr); + } tlb_fn(env, &scratch, reg_off, addr, retaddr); } } @@ -5406,20 +5411,53 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, memcpy(vd, &scratch, reg_max); } +static inline QEMU_ALWAYS_INLINE +void sve_ld1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, + target_ulong base, uint32_t desc, uintptr_t retaddr, + int esize, int msize, zreg_off_fn *off_fn, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn) +{ + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + /* Remove mtedesc from the normal sve descriptor. */ + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + + /* + * ??? TODO: For the 32-bit offset extractions, base + ofs cannot + * offset base entirely over the address space hole to change the + * pointer tag, or change the bit55 selector. So we could here + * examine TBI + TCMA like we do for sve_ldN_r_mte(). + */ + sve_ld1_z(env, vd, vg, vm, base, desc, retaddr, mtedesc, + esize, msize, off_fn, host_fn, tlb_fn); +} + #define DO_LD1_ZPZ_S(MEM, OFS, MSZ) \ void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ void *vm, target_ulong base, uint32_t desc) \ { \ - sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \ + sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +} \ +void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint32_t desc) \ +{ \ + sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \ + off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } #define DO_LD1_ZPZ_D(MEM, OFS, MSZ) \ void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ void *vm, target_ulong base, uint32_t desc) \ { \ - sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \ + sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +} \ +void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint32_t desc) \ +{ \ + sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \ + off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } DO_LD1_ZPZ_S(bsu, zsu, MO_8) @@ -5498,7 +5536,8 @@ DO_LD1_ZPZ_D(dd_be, zd, MO_64) static inline QEMU_ALWAYS_INLINE void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t retaddr, - const int esz, const int msz, zreg_off_fn *off_fn, + uint32_t mtedesc, const int esz, const int msz, + zreg_off_fn *off_fn, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { @@ -5523,6 +5562,9 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, * Probe the first element, allowing faults. */ addr = base + (off_fn(vm, reg_off) << scale); + if (mtedesc) { + mte_check1(env, mtedesc, addr, retaddr); + } tlb_fn(env, vd, reg_off, addr, retaddr); /* After any fault, zero the other elements. */ @@ -5555,7 +5597,11 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, (env_cpu(env), addr, msize) & BP_MEM_READ)) { goto fault; } - /* TODO: MTE check. */ + if (mtedesc && + arm_tlb_mte_tagged(&info.attrs) && + !mte_probe1(env, mtedesc, addr)) { + goto fault; + } host_fn(vd, reg_off, info.host); } @@ -5568,20 +5614,58 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, record_fault(env, reg_off, reg_max); } -#define DO_LDFF1_ZPZ_S(MEM, OFS, MSZ) \ -void HELPER(sve_ldff##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ -{ \ - sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), MO_32, MSZ, \ - off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +static inline QEMU_ALWAYS_INLINE +void sve_ldff1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, + target_ulong base, uint32_t desc, uintptr_t retaddr, + const int esz, const int msz, + zreg_off_fn *off_fn, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn) +{ + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + /* Remove mtedesc from the normal sve descriptor. */ + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + + /* + * ??? TODO: For the 32-bit offset extractions, base + ofs cannot + * offset base entirely over the address space hole to change the + * pointer tag, or change the bit55 selector. So we could here + * examine TBI + TCMA like we do for sve_ldN_r_mte(). + */ + sve_ldff1_z(env, vd, vg, vm, base, desc, retaddr, mtedesc, + esz, msz, off_fn, host_fn, tlb_fn); } -#define DO_LDFF1_ZPZ_D(MEM, OFS, MSZ) \ -void HELPER(sve_ldff##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ -{ \ - sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), MO_64, MSZ, \ - off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +#define DO_LDFF1_ZPZ_S(MEM, OFS, MSZ) \ +void HELPER(sve_ldff##MEM##_##OFS) \ + (CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint32_t desc) \ +{ \ + sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_32, MSZ, \ + off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +} \ +void HELPER(sve_ldff##MEM##_##OFS##_mte) \ + (CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint32_t desc) \ +{ \ + sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_32, MSZ, \ + off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +} + +#define DO_LDFF1_ZPZ_D(MEM, OFS, MSZ) \ +void HELPER(sve_ldff##MEM##_##OFS) \ + (CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint32_t desc) \ +{ \ + sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_64, MSZ, \ + off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +} \ +void HELPER(sve_ldff##MEM##_##OFS##_mte) \ + (CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint32_t desc) \ +{ \ + sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_64, MSZ, \ + off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } DO_LDFF1_ZPZ_S(bsu, zsu, MO_8) @@ -5653,7 +5737,8 @@ DO_LDFF1_ZPZ_D(dd_be, zd, MO_64) static inline QEMU_ALWAYS_INLINE void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t retaddr, - int esize, int msize, zreg_off_fn *off_fn, + uint32_t mtedesc, int esize, int msize, + zreg_off_fn *off_fn, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { @@ -5697,7 +5782,10 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, cpu_check_watchpoint(env_cpu(env), addr, msize, info.attrs, BP_MEM_WRITE, retaddr); } - /* TODO: MTE check. */ + + if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) { + mte_check1(env, mtedesc, addr, retaddr); + } } i += 1; reg_off += esize; @@ -5727,20 +5815,53 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, } while (reg_off < reg_max); } -#define DO_ST1_ZPZ_S(MEM, OFS, MSZ) \ -void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ +static inline QEMU_ALWAYS_INLINE +void sve_st1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, + target_ulong base, uint32_t desc, uintptr_t retaddr, + int esize, int msize, zreg_off_fn *off_fn, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn) +{ + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + /* Remove mtedesc from the normal sve descriptor. */ + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + + /* + * ??? TODO: For the 32-bit offset extractions, base + ofs cannot + * offset base entirely over the address space hole to change the + * pointer tag, or change the bit55 selector. So we could here + * examine TBI + TCMA like we do for sve_ldN_r_mte(). + */ + sve_st1_z(env, vd, vg, vm, base, desc, retaddr, mtedesc, + esize, msize, off_fn, host_fn, tlb_fn); +} + +#define DO_ST1_ZPZ_S(MEM, OFS, MSZ) \ +void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ void *vm, target_ulong base, uint32_t desc) \ -{ \ - sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \ - off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ +{ \ + sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \ + off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ +} \ +void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint32_t desc) \ +{ \ + sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \ + off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ } -#define DO_ST1_ZPZ_D(MEM, OFS, MSZ) \ -void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ +#define DO_ST1_ZPZ_D(MEM, OFS, MSZ) \ +void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ void *vm, target_ulong base, uint32_t desc) \ -{ \ - sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \ - off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ +{ \ + sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \ + off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ +} \ +void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint32_t desc) \ +{ \ + sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \ + off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ } DO_ST1_ZPZ_S(bs, zsu, MO_8) |