aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-07-11 22:02:09 +0530
committerRichard Henderson <richard.henderson@linaro.org>2022-07-11 22:02:09 +0530
commit9fed1bca6bc643ce91b6117f4974421aaede4751 (patch)
tree41ac4e6c9e822fb36a4be1aad6b5855bf0237eb7
parent63b38f6c85acd312c2cab68554abf33adf4ee2b3 (diff)
parentf9982ceaf26df27d15547a3a7990a95019e9e3a8 (diff)
Merge tag 'pull-target-arm-20220711' of https://git.linaro.org/people/pmaydell/qemu-arm into staging
target-arm: * Implement SME emulation, for both system and linux-user # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmLMLC8ZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3sR6D/wN7+rQ86FnPEpeqqh37Chx # dyCwmAjad7cSJWY4d6RlhNBll35D2nPIdo7MQrUi7ViNL+mDRGr4xSeYOU3IBXd7 # hxRY2rAjwlVtpL6WWJkHeZTr7n+gHtds/Pda+f7d8E7RmsXtD4uRSnbGG82HVGy0 # suG6MJwDYncSoa4AlX/J6sBBYha0cusTguTbkGkEfRonKgvQ6PAogYU8zAmI4EEz # BsdhXOjH3FGO6aUUVjZKJd1CuISNYe32sZY1OcfuiB1KbBQaRHt2LbxfKSQGMKwd # 0GzXZK9NJ0xIteAX/ESHVakudennqaBY7kdbBdLwb485pQ25r7P2jGyPM4J4fota # fXScOQKGGVfNySPwTOPR0QLzshcckWw7+Y+AOBg/rpbepSNT41bwI6Ldjs8oNtcI # O44wNgbN4uvbVul4dj0rOrc5azMvfH0cU+SSrZAInahEHpCN9o/NQz1D2fju7j9W # MujJCzRVBQSCvUwD1jX6+YDTJU3y84HeGYB/whXt8tg67Ump/5IgUInACgQzwwEZ # ZP+vxFmhuMlIkbfaiDgYdx2CLjTVSJ7YHjGx/rkqlpLVx6DgGA0klzgHV4L8fbWh # RFY8fejQve5QzsdiiDdeRWigFY2LnDEnphwOAy7tzktRpai8FnK3aMZyIj/5WODL # uu+gSmYoToLhC35Uan5otg== # =D1/T # -----END PGP SIGNATURE----- # gpg: Signature made Mon 11 Jul 2022 07:27:03 PM +0530 # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] * tag 'pull-target-arm-20220711' of https://git.linaro.org/people/pmaydell/qemu-arm: (45 commits) linux-user/aarch64: Add SME related hwcap entries target/arm: Enable SME for user-only target/arm: Only set ZEN in reset if SVE present linux-user/aarch64: Implement PR_SME_GET_VL, PR_SME_SET_VL linux-user: Rename sve prctls linux-user/aarch64: Implement SME signal handling linux-user/aarch64: Move sve record checks into restore linux-user/aarch64: Verify extra record lock succeeded linux-user/aarch64: Do not allow duplicate or short sve records linux-user/aarch64: Tidy target_restore_sigframe error return linux-user/aarch64: Add SM bit to SVE signal context linux-user/aarch64: Reset PSTATE.SM on syscalls linux-user/aarch64: Clear tpidr2_el0 if CLONE_SETTLS target/arm: Enable SME for -cpu max target/arm: Reset streaming sve state on exception boundaries target/arm: Implement SCLAMP, UCLAMP target/arm: Implement REVD target/arm: Implement PSEL target/arm: Implement SME integer outer product target/arm: Implement FMOPA, FMOPS (widening) ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--docs/system/arm/emulation.rst4
-rw-r--r--linux-user/aarch64/cpu_loop.c9
-rw-r--r--linux-user/aarch64/signal.c241
-rw-r--r--linux-user/aarch64/target_cpu.h5
-rw-r--r--linux-user/aarch64/target_prctl.h62
-rw-r--r--linux-user/elfload.c20
-rw-r--r--linux-user/syscall.c28
-rw-r--r--target/arm/cpu.c35
-rw-r--r--target/arm/cpu.h7
-rw-r--r--target/arm/cpu64.c11
-rw-r--r--target/arm/helper-sme.h126
-rw-r--r--target/arm/helper-sve.h4
-rw-r--r--target/arm/helper.c56
-rw-r--r--target/arm/helper.h18
-rw-r--r--target/arm/meson.build3
-rw-r--r--target/arm/sme-fa64.decode60
-rw-r--r--target/arm/sme.decode88
-rw-r--r--target/arm/sme_helper.c1140
-rw-r--r--target/arm/sve.decode41
-rw-r--r--target/arm/sve_helper.c28
-rw-r--r--target/arm/translate-a64.c103
-rw-r--r--target/arm/translate-a64.h45
-rw-r--r--target/arm/translate-sme.c373
-rw-r--r--target/arm/translate-sve.c393
-rw-r--r--target/arm/translate-vfp.c12
-rw-r--r--target/arm/translate.c2
-rw-r--r--target/arm/translate.h16
-rw-r--r--target/arm/vec_helper.c24
28 files changed, 2820 insertions, 134 deletions
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
index 83b4410065..8e494c8bea 100644
--- a/docs/system/arm/emulation.rst
+++ b/docs/system/arm/emulation.rst
@@ -65,6 +65,10 @@ the following architecture extensions:
- FEAT_SHA512 (Advanced SIMD SHA512 instructions)
- FEAT_SM3 (Advanced SIMD SM3 instructions)
- FEAT_SM4 (Advanced SIMD SM4 instructions)
+- FEAT_SME (Scalable Matrix Extension)
+- FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode)
+- FEAT_SME_F64F64 (Double-precision floating-point outer product instructions)
+- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions)
- FEAT_SPECRES (Speculation restriction instructions)
- FEAT_SSBS (Speculative Store Bypass Safe)
- FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain)
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
index f7ef36cd9f..9875d609a9 100644
--- a/linux-user/aarch64/cpu_loop.c
+++ b/linux-user/aarch64/cpu_loop.c
@@ -89,6 +89,15 @@ void cpu_loop(CPUARMState *env)
switch (trapnr) {
case EXCP_SWI:
+ /*
+ * On syscall, PSTATE.ZA is preserved, along with the ZA matrix.
+ * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState.
+ */
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
+ env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0);
+ arm_rebuild_hflags(env);
+ arm_reset_sve_state(env);
+ }
ret = do_syscall(env,
env->xregs[8],
env->xregs[0],
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
index 7da0e36c6d..6a2c6e06d2 100644
--- a/linux-user/aarch64/signal.c
+++ b/linux-user/aarch64/signal.c
@@ -78,7 +78,8 @@ struct target_extra_context {
struct target_sve_context {
struct target_aarch64_ctx head;
uint16_t vl;
- uint16_t reserved[3];
+ uint16_t flags;
+ uint16_t reserved[2];
/* The actual SVE data immediately follows. It is laid out
* according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
* the original struct pointer.
@@ -101,6 +102,24 @@ struct target_sve_context {
#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
(TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
+#define TARGET_SVE_SIG_FLAG_SM 1
+
+#define TARGET_ZA_MAGIC 0x54366345
+
+struct target_za_context {
+ struct target_aarch64_ctx head;
+ uint16_t vl;
+ uint16_t reserved[3];
+ /* The actual ZA data immediately follows. */
+};
+
+#define TARGET_ZA_SIG_REGS_OFFSET \
+ QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
+#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
+ (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
+#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
+ TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
+
struct target_rt_sigframe {
struct target_siginfo info;
struct target_ucontext uc;
@@ -173,13 +192,17 @@ static void target_setup_end_record(struct target_aarch64_ctx *end)
}
static void target_setup_sve_record(struct target_sve_context *sve,
- CPUARMState *env, int vq, int size)
+ CPUARMState *env, int size)
{
- int i, j;
+ int i, j, vq = sve_vq(env);
+ memset(sve, 0, sizeof(*sve));
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
__put_user(size, &sve->head.size);
__put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
+ __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags);
+ }
/* Note that SVE regs are stored as a byte stream, with each byte element
* at a subsequent address. This corresponds to a little-endian store
@@ -200,6 +223,35 @@ static void target_setup_sve_record(struct target_sve_context *sve,
}
}
+static void target_setup_za_record(struct target_za_context *za,
+ CPUARMState *env, int size)
+{
+ int vq = sme_vq(env);
+ int vl = vq * TARGET_SVE_VQ_BYTES;
+ int i, j;
+
+ memset(za, 0, sizeof(*za));
+ __put_user(TARGET_ZA_MAGIC, &za->head.magic);
+ __put_user(size, &za->head.size);
+ __put_user(vl, &za->vl);
+
+ if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
+ return;
+ }
+ assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq));
+
+ /*
+ * Note that ZA vectors are stored as a byte stream,
+ * with each byte element at a subsequent address.
+ */
+ for (i = 0; i < vl; ++i) {
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
+ for (j = 0; j < vq * 2; ++j) {
+ __put_user_e(env->zarray[i].d[j], z + j, le);
+ }
+ }
+}
+
static void target_restore_general_frame(CPUARMState *env,
struct target_rt_sigframe *sf)
{
@@ -243,12 +295,50 @@ static void target_restore_fpsimd_record(CPUARMState *env,
}
}
-static void target_restore_sve_record(CPUARMState *env,
- struct target_sve_context *sve, int vq)
+static bool target_restore_sve_record(CPUARMState *env,
+ struct target_sve_context *sve,
+ int size, int *svcr)
{
- int i, j;
+ int i, j, vl, vq, flags;
+ bool sm;
- /* Note that SVE regs are stored as a byte stream, with each byte element
+ __get_user(vl, &sve->vl);
+ __get_user(flags, &sve->flags);
+
+ sm = flags & TARGET_SVE_SIG_FLAG_SM;
+
+ /* The cpu must support Streaming or Non-streaming SVE. */
+ if (sm
+ ? !cpu_isar_feature(aa64_sme, env_archcpu(env))
+ : !cpu_isar_feature(aa64_sve, env_archcpu(env))) {
+ return false;
+ }
+
+ /*
+ * Note that we cannot use sve_vq() because that depends on the
+ * current setting of PSTATE.SM, not the state to be restored.
+ */
+ vq = sve_vqm1_for_el_sm(env, 0, sm) + 1;
+
+ /* Reject mismatched VL. */
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
+ return false;
+ }
+
+ /* Accept empty record -- used to clear PSTATE.SM. */
+ if (size <= sizeof(*sve)) {
+ return true;
+ }
+
+ /* Reject non-empty but incomplete record. */
+ if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) {
+ return false;
+ }
+
+ *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
+
+ /*
+ * Note that SVE regs are stored as a byte stream, with each byte element
* at a subsequent address. This corresponds to a little-endian load
* of our 64-bit hunks.
*/
@@ -270,6 +360,46 @@ static void target_restore_sve_record(CPUARMState *env,
}
}
}
+ return true;
+}
+
+static bool target_restore_za_record(CPUARMState *env,
+ struct target_za_context *za,
+ int size, int *svcr)
+{
+ int i, j, vl, vq;
+
+ if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ return false;
+ }
+
+ __get_user(vl, &za->vl);
+ vq = sme_vq(env);
+
+ /* Reject mismatched VL. */
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
+ return false;
+ }
+
+ /* Accept empty record -- used to clear PSTATE.ZA. */
+ if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
+ return true;
+ }
+
+ /* Reject non-empty but incomplete record. */
+ if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
+ return false;
+ }
+
+ *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
+
+ for (i = 0; i < vl; ++i) {
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
+ for (j = 0; j < vq * 2; ++j) {
+ __get_user_e(env->zarray[i].d[j], z + j, le);
+ }
+ }
+ return true;
}
static int target_restore_sigframe(CPUARMState *env,
@@ -278,10 +408,12 @@ static int target_restore_sigframe(CPUARMState *env,
struct target_aarch64_ctx *ctx, *extra = NULL;
struct target_fpsimd_context *fpsimd = NULL;
struct target_sve_context *sve = NULL;
+ struct target_za_context *za = NULL;
uint64_t extra_datap = 0;
bool used_extra = false;
- bool err = false;
- int vq = 0, sve_size = 0;
+ int sve_size = 0;
+ int za_size = 0;
+ int svcr = 0;
target_restore_general_frame(env, sf);
@@ -294,8 +426,7 @@ static int target_restore_sigframe(CPUARMState *env,
switch (magic) {
case 0:
if (size != 0) {
- err = true;
- goto exit;
+ goto err;
}
if (used_extra) {
ctx = NULL;
@@ -307,42 +438,46 @@ static int target_restore_sigframe(CPUARMState *env,
case TARGET_FPSIMD_MAGIC:
if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
- err = true;
- goto exit;
+ goto err;
}
fpsimd = (struct target_fpsimd_context *)ctx;
break;
case TARGET_SVE_MAGIC:
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
- vq = sve_vq(env);
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
- if (!sve && size == sve_size) {
- sve = (struct target_sve_context *)ctx;
- break;
- }
+ if (sve || size < sizeof(struct target_sve_context)) {
+ goto err;
}
- err = true;
- goto exit;
+ sve = (struct target_sve_context *)ctx;
+ sve_size = size;
+ break;
+
+ case TARGET_ZA_MAGIC:
+ if (za || size < sizeof(struct target_za_context)) {
+ goto err;
+ }
+ za = (struct target_za_context *)ctx;
+ za_size = size;
+ break;
case TARGET_EXTRA_MAGIC:
if (extra || size != sizeof(struct target_extra_context)) {
- err = true;
- goto exit;
+ goto err;
}
__get_user(extra_datap,
&((struct target_extra_context *)ctx)->datap);
__get_user(extra_size,
&((struct target_extra_context *)ctx)->size);
extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
+ if (!extra) {
+ return 1;
+ }
break;
default:
/* Unknown record -- we certainly didn't generate it.
* Did we in fact get out of sync?
*/
- err = true;
- goto exit;
+ goto err;
}
ctx = (void *)ctx + size;
}
@@ -351,17 +486,26 @@ static int target_restore_sigframe(CPUARMState *env,
if (fpsimd) {
target_restore_fpsimd_record(env, fpsimd);
} else {
- err = true;
+ goto err;
}
/* SVE data, if present, overwrites FPSIMD data. */
- if (sve) {
- target_restore_sve_record(env, sve, vq);
+ if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
+ goto err;
+ }
+ if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
+ goto err;
+ }
+ if (env->svcr != svcr) {
+ env->svcr = svcr;
+ arm_rebuild_hflags(env);
}
+ unlock_user(extra, extra_datap, 0);
+ return 0;
- exit:
+ err:
unlock_user(extra, extra_datap, 0);
- return err;
+ return 1;
}
static abi_ulong get_sigframe(struct target_sigaction *ka,
@@ -423,7 +567,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
.total_size = offsetof(struct target_rt_sigframe,
uc.tuc_mcontext.__reserved),
};
- int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
+ int sve_size = 0, za_size = 0;
struct target_rt_sigframe *frame;
struct target_rt_frame_record *fr;
abi_ulong frame_addr, return_addr;
@@ -433,11 +578,20 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
&layout);
/* SVE state needs saving only if it exists. */
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
- vq = sve_vq(env);
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
+ cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
sve_ofs = alloc_sigframe_space(sve_size, &layout);
}
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ /* ZA state needs saving only if it is enabled. */
+ if (FIELD_EX64(env->svcr, SVCR, ZA)) {
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
+ } else {
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
+ }
+ za_ofs = alloc_sigframe_space(za_size, &layout);
+ }
if (layout.extra_ofs) {
/* Reserve space for the extra end marker. The standard end marker
@@ -484,7 +638,10 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
target_setup_end_record((void *)frame + layout.extra_end_ofs);
}
if (sve_ofs) {
- target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
+ target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
+ }
+ if (za_ofs) {
+ target_setup_za_record((void *)frame + za_ofs, env, za_size);
}
/* Set up the stack frame for unwinding. */
@@ -508,6 +665,18 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
env->btype = 2;
}
+ /*
+ * Invoke the signal handler with both SM and ZA disabled.
+ * When clearing SM, ResetSVEState, per SMSTOP.
+ */
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
+ arm_reset_sve_state(env);
+ }
+ if (env->svcr) {
+ env->svcr = 0;
+ arm_rebuild_hflags(env);
+ }
+
if (info) {
tswap_siginfo(&frame->info, info);
env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h
index 97a477bd3e..f90359faf2 100644
--- a/linux-user/aarch64/target_cpu.h
+++ b/linux-user/aarch64/target_cpu.h
@@ -34,10 +34,13 @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags)
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
{
- /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
+ /*
+ * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
* different from AArch32 Linux, which uses TPIDRRO.
*/
env->cp15.tpidr_el[0] = newtls;
+ /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */
+ env->cp15.tpidr2_el0 = 0;
}
static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
index 1d440ffbea..907c314146 100644
--- a/linux-user/aarch64/target_prctl.h
+++ b/linux-user/aarch64/target_prctl.h
@@ -6,17 +6,18 @@
#ifndef AARCH64_TARGET_PRCTL_H
#define AARCH64_TARGET_PRCTL_H
-static abi_long do_prctl_get_vl(CPUArchState *env)
+static abi_long do_prctl_sve_get_vl(CPUArchState *env)
{
ARMCPU *cpu = env_archcpu(env);
if (cpu_isar_feature(aa64_sve, cpu)) {
+ /* PSTATE.SM is always unset on syscall entry. */
return sve_vq(env) * 16;
}
return -TARGET_EINVAL;
}
-#define do_prctl_get_vl do_prctl_get_vl
+#define do_prctl_sve_get_vl do_prctl_sve_get_vl
-static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
+static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
{
/*
* We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
@@ -27,6 +28,7 @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
uint32_t vq, old_vq;
+ /* PSTATE.SM is always unset on syscall entry. */
old_vq = sve_vq(env);
/*
@@ -47,7 +49,59 @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
}
return -TARGET_EINVAL;
}
-#define do_prctl_set_vl do_prctl_set_vl
+#define do_prctl_sve_set_vl do_prctl_sve_set_vl
+
+static abi_long do_prctl_sme_get_vl(CPUArchState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ if (cpu_isar_feature(aa64_sme, cpu)) {
+ return sme_vq(env) * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_sme_get_vl do_prctl_sme_get_vl
+
+static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2)
+{
+ /*
+ * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
+ * i.e. VL=8192, even though the architectural maximum is VQ=16.
+ */
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
+ int vq, old_vq;
+
+ old_vq = sme_vq(env);
+
+ /*
+ * Bound the value of vq, so that we know that it fits into
+ * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared
+ * on syscall entry, we are not modifying the current SVE
+ * vector length.
+ */
+ vq = MAX(arg2 / 16, 1);
+ vq = MIN(vq, 16);
+ env->vfp.smcr_el[1] =
+ FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1);
+
+ /* Delay rebuilding hflags until we know if ZA must change. */
+ vq = sve_vqm1_for_el_sm(env, 0, true) + 1;
+
+ if (vq != old_vq) {
+ /*
+ * PSTATE.ZA state is cleared on any change to SVL.
+ * We need not call arm_rebuild_hflags because PSTATE.SM was
+ * cleared on syscall entry, so this hasn't changed VL.
+ */
+ env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0);
+ arm_rebuild_hflags(env);
+ }
+ return vq * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_sme_set_vl do_prctl_sme_set_vl
static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
{
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 1de77c7959..ce902dbd56 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -605,6 +605,18 @@ enum {
ARM_HWCAP2_A64_RNG = 1 << 16,
ARM_HWCAP2_A64_BTI = 1 << 17,
ARM_HWCAP2_A64_MTE = 1 << 18,
+ ARM_HWCAP2_A64_ECV = 1 << 19,
+ ARM_HWCAP2_A64_AFP = 1 << 20,
+ ARM_HWCAP2_A64_RPRES = 1 << 21,
+ ARM_HWCAP2_A64_MTE3 = 1 << 22,
+ ARM_HWCAP2_A64_SME = 1 << 23,
+ ARM_HWCAP2_A64_SME_I16I64 = 1 << 24,
+ ARM_HWCAP2_A64_SME_F64F64 = 1 << 25,
+ ARM_HWCAP2_A64_SME_I8I32 = 1 << 26,
+ ARM_HWCAP2_A64_SME_F16F32 = 1 << 27,
+ ARM_HWCAP2_A64_SME_B16F32 = 1 << 28,
+ ARM_HWCAP2_A64_SME_F32F32 = 1 << 29,
+ ARM_HWCAP2_A64_SME_FA64 = 1 << 30,
};
#define ELF_HWCAP get_elf_hwcap()
@@ -674,6 +686,14 @@ static uint32_t get_elf_hwcap2(void)
GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
+ GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
+ ARM_HWCAP2_A64_SME_F32F32 |
+ ARM_HWCAP2_A64_SME_B16F32 |
+ ARM_HWCAP2_A64_SME_F16F32 |
+ ARM_HWCAP2_A64_SME_I8I32));
+ GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
+ GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
+ GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
return hwcaps;
}
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 669add74c1..991b85e6b4 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -6343,6 +6343,12 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
#ifndef PR_SET_SYSCALL_USER_DISPATCH
# define PR_SET_SYSCALL_USER_DISPATCH 59
#endif
+#ifndef PR_SME_SET_VL
+# define PR_SME_SET_VL 63
+# define PR_SME_GET_VL 64
+# define PR_SME_VL_LEN_MASK 0xffff
+# define PR_SME_VL_INHERIT (1 << 17)
+#endif
#include "target_prctl.h"
@@ -6362,11 +6368,11 @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
#ifndef do_prctl_set_fp_mode
#define do_prctl_set_fp_mode do_prctl_inval1
#endif
-#ifndef do_prctl_get_vl
-#define do_prctl_get_vl do_prctl_inval0
+#ifndef do_prctl_sve_get_vl
+#define do_prctl_sve_get_vl do_prctl_inval0
#endif
-#ifndef do_prctl_set_vl
-#define do_prctl_set_vl do_prctl_inval1
+#ifndef do_prctl_sve_set_vl
+#define do_prctl_sve_set_vl do_prctl_inval1
#endif
#ifndef do_prctl_reset_keys
#define do_prctl_reset_keys do_prctl_inval1
@@ -6383,6 +6389,12 @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
#ifndef do_prctl_set_unalign
#define do_prctl_set_unalign do_prctl_inval1
#endif
+#ifndef do_prctl_sme_get_vl
+#define do_prctl_sme_get_vl do_prctl_inval0
+#endif
+#ifndef do_prctl_sme_set_vl
+#define do_prctl_sme_set_vl do_prctl_inval1
+#endif
static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
abi_long arg3, abi_long arg4, abi_long arg5)
@@ -6431,9 +6443,13 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
case PR_SET_FP_MODE:
return do_prctl_set_fp_mode(env, arg2);
case PR_SVE_GET_VL:
- return do_prctl_get_vl(env);
+ return do_prctl_sve_get_vl(env);
case PR_SVE_SET_VL:
- return do_prctl_set_vl(env, arg2);
+ return do_prctl_sve_set_vl(env, arg2);
+ case PR_SME_GET_VL:
+ return do_prctl_sme_get_vl(env);
+ case PR_SME_SET_VL:
+ return do_prctl_sme_set_vl(env, arg2);
case PR_PAC_RESET_KEYS:
if (arg3 || arg4 || arg5) {
return -TARGET_EINVAL;
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index ae6dca2f01..5de7e097e9 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -204,13 +204,23 @@ static void arm_cpu_reset(DeviceState *dev)
/* and to the FP/Neon instructions */
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
CPACR_EL1, FPEN, 3);
- /* and to the SVE instructions */
- env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
- CPACR_EL1, ZEN, 3);
- /* with reasonable vector length */
+ /* and to the SVE instructions, with default vector length */
if (cpu_isar_feature(aa64_sve, cpu)) {
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
+ CPACR_EL1, ZEN, 3);
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
}
+ /* and for SME instructions, with default vector length, and TPIDR2 */
+ if (cpu_isar_feature(aa64_sme, cpu)) {
+ env->cp15.sctlr_el[1] |= SCTLR_EnTP2;
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
+ CPACR_EL1, SMEN, 3);
+ env->vfp.smcr_el[1] = cpu->sme_default_vq - 1;
+ if (cpu_isar_feature(aa64_sme_fa64, cpu)) {
+ env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1],
+ SMCR, FA64, 1);
+ }
+ }
/*
* Enable 48-bit address space (TODO: take reserved_va into account).
* Enable TBI0 but not TBI1.
@@ -878,6 +888,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
int i;
int el = arm_current_el(env);
const char *ns_status;
+ bool sve;
qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
for (i = 0; i < 32; i++) {
@@ -904,6 +915,12 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
el,
psr & PSTATE_SP ? 'h' : 't');
+ if (cpu_isar_feature(aa64_sme, cpu)) {
+ qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c",
+ env->svcr,
+ (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'),
+ (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
+ }
if (cpu_isar_feature(aa64_bti, cpu)) {
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
}
@@ -918,7 +935,15 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
vfp_get_fpcr(env), vfp_get_fpsr(env));
- if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
+ if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) {
+ sve = sme_exception_el(env, el) == 0;
+ } else if (cpu_isar_feature(aa64_sve, cpu)) {
+ sve = sve_exception_el(env, el) == 0;
+ } else {
+ sve = false;
+ }
+
+ if (sve) {
int j, zcr_len = sve_vqm1_for_el(env, el);
for (i = 0; i <= FFR_PRED_NUM; i++) {
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 1f4f3e0485..1e36a839ee 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3158,6 +3158,11 @@ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
* the same thing as the current security state of the processor!
*/
FIELD(TBFLAG_A32, NS, 10, 1)
+/*
+ * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not.
+ * This requires an SME trap from AArch32 mode when using NEON.
+ */
+FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
/*
* Bit usage when in AArch32 state, for M-profile only.
@@ -3195,6 +3200,8 @@ FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2)
FIELD(TBFLAG_A64, PSTATE_SM, 22, 1)
FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1)
FIELD(TBFLAG_A64, SVL, 24, 4)
+/* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */
+FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1)
/*
* Helpers for using the above.
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index b4fd4b7ec8..78e27f778a 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -1024,6 +1024,7 @@ static void aarch64_max_initfn(Object *obj)
*/
t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */
+ t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */
t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */
cpu->isar.id_aa64pfr1 = t;
@@ -1074,6 +1075,16 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
cpu->isar.id_aa64dfr0 = t;
+ t = cpu->isar.id_aa64smfr0;
+ t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */
+ t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */
+ t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */
+ t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */
+ t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */
+ t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */
+ t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */
+ cpu->isar.id_aa64smfr0 = t;
+
/* Replicate the same data to the 32-bit id registers. */
aa32_max_features(cpu);
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
index 3bd48c235f..d2d544a696 100644
--- a/target/arm/helper-sme.h
+++ b/target/arm/helper-sme.h
@@ -19,3 +19,129 @@
DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
+
+DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
+
+/* Move to/from vertical array slices, i.e. columns, so 'c'. */
+DEF_HELPER_FLAGS_4(sme_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme_sumopa_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme_usmopa_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme_smopa_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme_umopa_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index dc629f851a..cc4e1d8948 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -325,6 +325,8 @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sel_zpzz_q, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
@@ -717,6 +719,8 @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index e6f37e160f..cfcad97ce0 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -6098,6 +6098,32 @@ int sme_exception_el(CPUARMState *env, int el)
return 0;
}
+/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
+static bool sme_fa64(CPUARMState *env, int el)
+{
+ if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
+ return false;
+ }
+
+ if (el <= 1 && !el_is_in_host(env, el)) {
+ if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
+ return false;
+ }
+ }
+ if (el <= 2 && arm_is_el2_enabled(env)) {
+ if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
+ return false;
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
* Given that SVE is enabled, return the vector length for EL.
*/
@@ -10801,6 +10827,20 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
}
+ /*
+ * The SME exception we are testing for is raised via
+ * AArch64.CheckFPAdvSIMDEnabled(), as called from
+ * AArch32.CheckAdvSIMDOrFPEnabled().
+ */
+ if (el == 0
+ && FIELD_EX64(env->svcr, SVCR, SM)
+ && (!arm_is_el2_enabled(env)
+ || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
+ && arm_el_is_aa64(env, 1)
+ && !sme_fa64(env, el)) {
+ DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
+ }
+
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
}
@@ -10850,6 +10890,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
}
if (FIELD_EX64(env->svcr, SVCR, SM)) {
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
+ DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
}
DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
}
@@ -11201,6 +11242,19 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
return;
}
+ old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
+ new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
+
+ /*
+ * Both AArch64.TakeException and AArch64.ExceptionReturn
+ * invoke ResetSVEState when taking an exception from, or
+ * returning to, AArch32 state when PSTATE.SM is enabled.
+ */
+ if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) {
+ arm_reset_sve_state(env);
+ return;
+ }
+
/*
* DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
* at ELx, or not available because the EL is in AArch32 state, then
@@ -11213,10 +11267,8 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
* we already have the correct register contents when encountering the
* vq0->vq0 transition between EL0->EL1.
*/
- old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
old_len = (old_a64 && !sve_exception_el(env, old_el)
? sve_vqm1_for_el(env, old_el) : 0);
- new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
new_len = (new_a64 && !sve_exception_el(env, new_el)
? sve_vqm1_for_el(env, new_el) : 0);
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 3a8ce42ab0..92f36d9dbb 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -1019,6 +1019,24 @@ DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
#ifdef TARGET_AARCH64
#include "helper-a64.h"
#include "helper-sve.h"
diff --git a/target/arm/meson.build b/target/arm/meson.build
index 43dc600547..87e911b27f 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -1,5 +1,7 @@
gen = [
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
+ decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
+ decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'),
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
@@ -50,6 +52,7 @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
'sme_helper.c',
'translate-a64.c',
'translate-sve.c',
+ 'translate-sme.c',
))
arm_softmmu_ss = ss.source_set()
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
new file mode 100644
index 0000000000..47708ccc8d
--- /dev/null
+++ b/target/arm/sme-fa64.decode
@@ -0,0 +1,60 @@
+# AArch64 SME allowed instruction decoding
+#
+# Copyright (c) 2022 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+
+# These patterns are taken from Appendix E1.1 of DDI0616 A.a,
+# Arm Architecture Reference Manual Supplement,
+# The Scalable Matrix Extension (SME), for Armv9-A
+
+{
+ [
+ OK 0-00 1110 0000 0001 0010 11-- ---- ---- # SMOV W|Xd,Vn.B[0]
+ OK 0-00 1110 0000 0010 0010 11-- ---- ---- # SMOV W|Xd,Vn.H[0]
+ OK 0100 1110 0000 0100 0010 11-- ---- ---- # SMOV Xd,Vn.S[0]
+ OK 0000 1110 0000 0001 0011 11-- ---- ---- # UMOV Wd,Vn.B[0]
+ OK 0000 1110 0000 0010 0011 11-- ---- ---- # UMOV Wd,Vn.H[0]
+ OK 0000 1110 0000 0100 0011 11-- ---- ---- # UMOV Wd,Vn.S[0]
+ OK 0100 1110 0000 1000 0011 11-- ---- ---- # UMOV Xd,Vn.D[0]
+ ]
+ FAIL 0--0 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD vector operations
+}
+
+{
+ [
+ OK 0101 1110 --1- ---- 11-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar)
+ OK 0101 1110 -10- ---- 00-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar, FP16)
+ OK 01-1 1110 1-10 0001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar)
+ OK 01-1 1110 1111 1001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar, FP16)
+ ]
+ FAIL 01-1 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD single-element operations
+}
+
+FAIL 0-00 110- ---- ---- ---- ---- ---- ---- # Advanced SIMD structure load/store
+FAIL 1100 1110 ---- ---- ---- ---- ---- ---- # Advanced SIMD cryptography extensions
+FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
+
+# These are the "avoidance of doubt" final table of Illegal Advanced SIMD instructions
+# We don't actually need to include these, as the default is OK.
+# -001 111- ---- ---- ---- ---- ---- ---- # Scalar floating-point operations
+# --10 110- ---- ---- ---- ---- ---- ---- # Load/store pair of FP registers
+# --01 1100 ---- ---- ---- ---- ---- ---- # Load FP register (PC-relative literal)
+# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm)
+# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
+# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
new file mode 100644
index 0000000000..628804e37a
--- /dev/null
+++ b/target/arm/sme.decode
@@ -0,0 +1,88 @@
+# AArch64 SME instruction descriptions
+#
+# Copyright (c) 2022 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+
+### SME Misc
+
+ZERO 11000000 00 001 00000000000 imm:8
+
+### SME Move into/from Array
+
+%mova_rs 13:2 !function=plus_12
+&mova esz rs pg zr za_imm v:bool to_vec:bool
+
+MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \
+ &mova to_vec=0 rs=%mova_rs
+MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \
+ &mova to_vec=0 rs=%mova_rs esz=4
+
+MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
+ &mova to_vec=1 rs=%mova_rs
+MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
+ &mova to_vec=1 rs=%mova_rs esz=4
+
+### SME Memory
+
+&ldst esz rs pg rn rm za_imm v:bool st:bool
+
+LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
+ &ldst rs=%mova_rs
+LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
+ &ldst esz=4 rs=%mova_rs
+
+&ldstr rv rn imm
+@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \
+ &ldstr rv=%mova_rs
+
+LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
+STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
+
+### SME Add Vector to Array
+
+&adda zad zn pm pn
+@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda
+@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda
+
+ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
+ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
+ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
+ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
+
+### SME Outer Product
+
+&op zad zn zm pm pn sub:bool
+@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op
+@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op
+
+FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
+FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
+
+BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
+FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
+
+SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32
+SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32
+USMOPA_s 1010000 1 10 0 ..... ... ... ..... . 00 .. @op_32
+UMOPA_s 1010000 1 10 1 ..... ... ... ..... . 00 .. @op_32
+
+SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64
+SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64
+USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64
+UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
index b215725594..f891306bb9 100644
--- a/target/arm/sme_helper.c
+++ b/target/arm/sme_helper.c
@@ -20,7 +20,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
+#include "tcg/tcg-gvec-desc.h"
#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+#include "qemu/int128.h"
+#include "fpu/softfloat.h"
+#include "vec_internal.h"
+#include "sve_ldst_internal.h"
/* ResetSVEState */
void arm_reset_sve_state(CPUARMState *env)
@@ -59,3 +66,1136 @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i)
memset(env->zarray, 0, sizeof(env->zarray));
}
}
+
+void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
+{
+ uint32_t i;
+
+ /*
+ * Special case clearing the entire ZA space.
+ * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any
+ * parts of the ZA storage outside of SVL.
+ */
+ if (imm == 0xff) {
+ memset(env->zarray, 0, sizeof(env->zarray));
+ return;
+ }
+
+ /*
+ * Recall that ZAnH.D[m] is spread across ZA[n+8*m],
+ * so each row is discontiguous within ZA[].
+ */
+ for (i = 0; i < svl; i++) {
+ if (imm & (1 << (i % 8))) {
+ memset(&env->zarray[i], 0, svl);
+ }
+ }
+}
+
+
+/*
+ * When considering the ZA storage as an array of elements of
+ * type T, the index within that array of the Nth element of
+ * a vertical slice of a tile can be calculated like this,
+ * regardless of the size of type T. This is because the tiles
+ * are interleaved, so if type T is size N bytes then row 1 of
+ * the tile is N rows away from row 0. The division by N to
+ * convert a byte offset into an array index and the multiplication
+ * by N to convert from vslice-index-within-the-tile to
+ * the index within the ZA storage cancel out.
+ */
+#define tile_vslice_index(i) ((i) * sizeof(ARMVectorReg))
+
+/*
+ * When doing byte arithmetic on the ZA storage, the element
+ * byteoff bytes away in a tile vertical slice is always this
+ * many bytes away in the ZA storage, regardless of the
+ * size of the tile element, assuming that byteoff is a multiple
+ * of the element size. Again this is because of the interleaving
+ * of the tiles. For instance if we have 1 byte per element then
+ * each row of the ZA storage has one byte of the vslice data,
+ * and (counting from 0) byte 8 goes in row 8 of the storage
+ * at offset (8 * row-size-in-bytes).
+ * If we have 8 bytes per element then each row of the ZA storage
+ * has 8 bytes of the data, but there are 8 interleaved tiles and
+ * so byte 8 of the data goes into row 1 of the tile,
+ * which is again row 8 of the storage, so the offset is still
+ * (8 * row-size-in-bytes). Similarly for other element sizes.
+ */
+#define tile_vslice_offset(byteoff) ((byteoff) * sizeof(ARMVectorReg))
+
+
+/*
+ * Move Zreg vector to ZArray column.
+ */
+#define DO_MOVA_C(NAME, TYPE, H) \
+void HELPER(NAME)(void *za, void *vn, void *vg, uint32_t desc) \
+{ \
+ int i, oprsz = simd_oprsz(desc); \
+ for (i = 0; i < oprsz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ if (pg & 1) { \
+ *(TYPE *)(za + tile_vslice_offset(i)) = *(TYPE *)(vn + H(i)); \
+ } \
+ i += sizeof(TYPE); \
+ pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+DO_MOVA_C(sme_mova_cz_b, uint8_t, H1)
+DO_MOVA_C(sme_mova_cz_h, uint16_t, H1_2)
+DO_MOVA_C(sme_mova_cz_s, uint32_t, H1_4)
+
+void HELPER(sme_mova_cz_d)(void *za, void *vn, void *vg, uint32_t desc)
+{
+ int i, oprsz = simd_oprsz(desc) / 8;
+ uint8_t *pg = vg;
+ uint64_t *n = vn;
+ uint64_t *a = za;
+
+ for (i = 0; i < oprsz; i++) {
+ if (pg[H1(i)] & 1) {
+ a[tile_vslice_index(i)] = n[i];
+ }
+ }
+}
+
+void HELPER(sme_mova_cz_q)(void *za, void *vn, void *vg, uint32_t desc)
+{
+ int i, oprsz = simd_oprsz(desc) / 16;
+ uint16_t *pg = vg;
+ Int128 *n = vn;
+ Int128 *a = za;
+
+ /*
+ * Int128 is used here simply to copy 16 bytes, and to simplify
+ * the address arithmetic.
+ */
+ for (i = 0; i < oprsz; i++) {
+ if (pg[H2(i)] & 1) {
+ a[tile_vslice_index(i)] = n[i];
+ }
+ }
+}
+
+#undef DO_MOVA_C
+
+/*
+ * Move ZArray column to Zreg vector.
+ */
+#define DO_MOVA_Z(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *za, void *vg, uint32_t desc) \
+{ \
+ int i, oprsz = simd_oprsz(desc); \
+ for (i = 0; i < oprsz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(za + tile_vslice_offset(i)); \
+ } \
+ i += sizeof(TYPE); \
+ pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+DO_MOVA_Z(sme_mova_zc_b, uint8_t, H1)
+DO_MOVA_Z(sme_mova_zc_h, uint16_t, H1_2)
+DO_MOVA_Z(sme_mova_zc_s, uint32_t, H1_4)
+
+void HELPER(sme_mova_zc_d)(void *vd, void *za, void *vg, uint32_t desc)
+{
+ int i, oprsz = simd_oprsz(desc) / 8;
+ uint8_t *pg = vg;
+ uint64_t *d = vd;
+ uint64_t *a = za;
+
+ for (i = 0; i < oprsz; i++) {
+ if (pg[H1(i)] & 1) {
+ d[i] = a[tile_vslice_index(i)];
+ }
+ }
+}
+
+void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
+{
+ int i, oprsz = simd_oprsz(desc) / 16;
+ uint16_t *pg = vg;
+ Int128 *d = vd;
+ Int128 *a = za;
+
+ /*
+ * Int128 is used here simply to copy 16 bytes, and to simplify
+ * the address arithmetic.
+ */
+ for (i = 0; i < oprsz; i++, za += sizeof(ARMVectorReg)) {
+ if (pg[H2(i)] & 1) {
+ d[i] = a[tile_vslice_index(i)];
+ }
+ }
+}
+
+#undef DO_MOVA_Z
+
+/*
+ * Clear elements in a tile slice comprising len bytes.
+ */
+
+typedef void ClearFn(void *ptr, size_t off, size_t len);
+
+static void clear_horizontal(void *ptr, size_t off, size_t len)
+{
+ memset(ptr + off, 0, len);
+}
+
+static void clear_vertical_b(void *vptr, size_t off, size_t len)
+{
+ for (size_t i = 0; i < len; ++i) {
+ *(uint8_t *)(vptr + tile_vslice_offset(i + off)) = 0;
+ }
+}
+
+static void clear_vertical_h(void *vptr, size_t off, size_t len)
+{
+ for (size_t i = 0; i < len; i += 2) {
+ *(uint16_t *)(vptr + tile_vslice_offset(i + off)) = 0;
+ }
+}
+
+static void clear_vertical_s(void *vptr, size_t off, size_t len)
+{
+ for (size_t i = 0; i < len; i += 4) {
+ *(uint32_t *)(vptr + tile_vslice_offset(i + off)) = 0;
+ }
+}
+
+static void clear_vertical_d(void *vptr, size_t off, size_t len)
+{
+ for (size_t i = 0; i < len; i += 8) {
+ *(uint64_t *)(vptr + tile_vslice_offset(i + off)) = 0;
+ }
+}
+
+static void clear_vertical_q(void *vptr, size_t off, size_t len)
+{
+ for (size_t i = 0; i < len; i += 16) {
+ memset(vptr + tile_vslice_offset(i + off), 0, 16);
+ }
+}
+
+/*
+ * Copy elements from an array into a tile slice comprising len bytes.
+ */
+
+typedef void CopyFn(void *dst, const void *src, size_t len);
+
+static void copy_horizontal(void *dst, const void *src, size_t len)
+{
+ memcpy(dst, src, len);
+}
+
+static void copy_vertical_b(void *vdst, const void *vsrc, size_t len)
+{
+ const uint8_t *src = vsrc;
+ uint8_t *dst = vdst;
+ size_t i;
+
+ for (i = 0; i < len; ++i) {
+ dst[tile_vslice_index(i)] = src[i];
+ }
+}
+
+static void copy_vertical_h(void *vdst, const void *vsrc, size_t len)
+{
+ const uint16_t *src = vsrc;
+ uint16_t *dst = vdst;
+ size_t i;
+
+ for (i = 0; i < len / 2; ++i) {
+ dst[tile_vslice_index(i)] = src[i];
+ }
+}
+
+static void copy_vertical_s(void *vdst, const void *vsrc, size_t len)
+{
+ const uint32_t *src = vsrc;
+ uint32_t *dst = vdst;
+ size_t i;
+
+ for (i = 0; i < len / 4; ++i) {
+ dst[tile_vslice_index(i)] = src[i];
+ }
+}
+
+static void copy_vertical_d(void *vdst, const void *vsrc, size_t len)
+{
+ const uint64_t *src = vsrc;
+ uint64_t *dst = vdst;
+ size_t i;
+
+ for (i = 0; i < len / 8; ++i) {
+ dst[tile_vslice_index(i)] = src[i];
+ }
+}
+
+static void copy_vertical_q(void *vdst, const void *vsrc, size_t len)
+{
+ for (size_t i = 0; i < len; i += 16) {
+ memcpy(vdst + tile_vslice_offset(i), vsrc + i, 16);
+ }
+}
+
+/*
+ * Host and TLB primitives for vertical tile slice addressing.
+ */
+
+#define DO_LD(NAME, TYPE, HOST, TLB) \
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
+{ \
+ TYPE val = HOST(host); \
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
+} \
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
+ intptr_t off, target_ulong addr, uintptr_t ra) \
+{ \
+ TYPE val = TLB(env, useronly_clean_ptr(addr), ra); \
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
+}
+
+#define DO_ST(NAME, TYPE, HOST, TLB) \
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
+{ \
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
+ HOST(host, val); \
+} \
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
+ intptr_t off, target_ulong addr, uintptr_t ra) \
+{ \
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
+ TLB(env, useronly_clean_ptr(addr), val, ra); \
+}
+
+/*
+ * The ARMVectorReg elements are stored in host-endian 64-bit units.
+ * For 128-bit quantities, the sequence defined by the Elem[] pseudocode
+ * corresponds to storing the two 64-bit pieces in little-endian order.
+ */
+#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
+{ \
+ uint64_t val0 = HOST(host), val1 = HOST(host + 8); \
+ uint64_t *ptr = za + off; \
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
+} \
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
+{ \
+ HNAME##_host(za, tile_vslice_offset(off), host); \
+} \
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
+ target_ulong addr, uintptr_t ra) \
+{ \
+ uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \
+ uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \
+ uint64_t *ptr = za + off; \
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
+} \
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
+ target_ulong addr, uintptr_t ra) \
+{ \
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
+}
+
+#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
+{ \
+ uint64_t *ptr = za + off; \
+ HOST(host, ptr[BE]); \
+ HOST(host + 1, ptr[!BE]); \
+} \
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
+{ \
+ HNAME##_host(za, tile_vslice_offset(off), host); \
+} \
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
+ target_ulong addr, uintptr_t ra) \
+{ \
+ uint64_t *ptr = za + off; \
+ TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \
+ TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \
+} \
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
+ target_ulong addr, uintptr_t ra) \
+{ \
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
+}
+
+DO_LD(ld1b, uint8_t, ldub_p, cpu_ldub_data_ra)
+DO_LD(ld1h_be, uint16_t, lduw_be_p, cpu_lduw_be_data_ra)
+DO_LD(ld1h_le, uint16_t, lduw_le_p, cpu_lduw_le_data_ra)
+DO_LD(ld1s_be, uint32_t, ldl_be_p, cpu_ldl_be_data_ra)
+DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra)
+DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra)
+DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra)
+
+DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra)
+DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra)
+
+DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra)
+DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra)
+DO_ST(st1h_le, uint16_t, stw_le_p, cpu_stw_le_data_ra)
+DO_ST(st1s_be, uint32_t, stl_be_p, cpu_stl_be_data_ra)
+DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra)
+DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra)
+DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra)
+
+DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra)
+DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra)
+
+#undef DO_LD
+#undef DO_ST
+#undef DO_LDQ
+#undef DO_STQ
+
+/*
+ * Common helper for all contiguous predicated loads.
+ */
+
+static inline QEMU_ALWAYS_INLINE
+void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
+ const int esz, uint32_t mtedesc, bool vertical,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn,
+ ClearFn *clr_fn,
+ CopyFn *cpy_fn)
+{
+ const intptr_t reg_max = simd_oprsz(desc);
+ const intptr_t esize = 1 << esz;
+ intptr_t reg_off, reg_last;
+ SVEContLdSt info;
+ void *host;
+ int flags;
+
+ /* Find the active elements. */
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
+ /* The entire predicate was false; no load occurs. */
+ clr_fn(za, 0, reg_max);
+ return;
+ }
+
+ /* Probe the page(s). Exit with exception for any invalid page. */
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra);
+
+ /* Handle watchpoints for all active elements. */
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
+ BP_MEM_READ, ra);
+
+ /*
+ * Handle mte checks for all active elements.
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
+ */
+ if (mtedesc) {
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
+ mtedesc, ra);
+ }
+
+ flags = info.page[0].flags | info.page[1].flags;
+ if (unlikely(flags != 0)) {
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ /*
+ * At least one page includes MMIO.
+ * Any bus operation can fail with cpu_transaction_failed,
+ * which for ARM will raise SyncExternal. Perform the load
+ * into scratch memory to preserve register state until the end.
+ */
+ ARMVectorReg scratch = { };
+
+ reg_off = info.reg_off_first[0];
+ reg_last = info.reg_off_last[1];
+ if (reg_last < 0) {
+ reg_last = info.reg_off_split;
+ if (reg_last < 0) {
+ reg_last = info.reg_off_last[0];
+ }
+ }
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ tlb_fn(env, &scratch, reg_off, addr + reg_off, ra);
+ }
+ reg_off += esize;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+
+ cpy_fn(za, &scratch, reg_max);
+ return;
+#endif
+ }
+
+ /* The entire operation is in RAM, on valid pages. */
+
+ reg_off = info.reg_off_first[0];
+ reg_last = info.reg_off_last[0];
+ host = info.page[0].host;
+
+ if (!vertical) {
+ memset(za, 0, reg_max);
+ } else if (reg_off) {
+ clr_fn(za, 0, reg_off);
+ }
+
+ while (reg_off <= reg_last) {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ host_fn(za, reg_off, host + reg_off);
+ } else if (vertical) {
+ clr_fn(za, reg_off, esize);
+ }
+ reg_off += esize;
+ } while (reg_off <= reg_last && (reg_off & 63));
+ }
+
+ /*
+ * Use the slow path to manage the cross-page misalignment.
+ * But we know this is RAM and cannot trap.
+ */
+ reg_off = info.reg_off_split;
+ if (unlikely(reg_off >= 0)) {
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
+ }
+
+ reg_off = info.reg_off_first[1];
+ if (unlikely(reg_off >= 0)) {
+ reg_last = info.reg_off_last[1];
+ host = info.page[1].host;
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ host_fn(za, reg_off, host + reg_off);
+ } else if (vertical) {
+ clr_fn(za, reg_off, esize);
+ }
+ reg_off += esize;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+ }
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg,
+ target_ulong addr, uint32_t desc, uintptr_t ra,
+ const int esz, bool vertical,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn,
+ ClearFn *clr_fn,
+ CopyFn *cpy_fn)
+{
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ int bit55 = extract64(addr, 55, 1);
+
+ /* Remove mtedesc from the normal sve descriptor. */
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+
+ /* Perform gross MTE suppression early. */
+ if (!tbi_check(desc, bit55) ||
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ mtedesc = 0;
+ }
+
+ sme_ld1(env, za, vg, addr, desc, ra, esz, mtedesc, vertical,
+ host_fn, tlb_fn, clr_fn, cpy_fn);
+}
+
+#define DO_LD(L, END, ESZ) \
+void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
+ clear_horizontal, copy_horizontal); \
+} \
+void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
+ clear_vertical_##L, copy_vertical_##L); \
+} \
+void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
+ clear_horizontal, copy_horizontal); \
+} \
+void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
+ clear_vertical_##L, copy_vertical_##L); \
+}
+
+DO_LD(b, , MO_8)
+DO_LD(h, _be, MO_16)
+DO_LD(h, _le, MO_16)
+DO_LD(s, _be, MO_32)
+DO_LD(s, _le, MO_32)
+DO_LD(d, _be, MO_64)
+DO_LD(d, _le, MO_64)
+DO_LD(q, _be, MO_128)
+DO_LD(q, _le, MO_128)
+
+#undef DO_LD
+
+/*
+ * Common helper for all contiguous predicated stores.
+ */
+
+static inline QEMU_ALWAYS_INLINE
+void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
+ const int esz, uint32_t mtedesc, bool vertical,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ const intptr_t reg_max = simd_oprsz(desc);
+ const intptr_t esize = 1 << esz;
+ intptr_t reg_off, reg_last;
+ SVEContLdSt info;
+ void *host;
+ int flags;
+
+ /* Find the active elements. */
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
+ /* The entire predicate was false; no store occurs. */
+ return;
+ }
+
+ /* Probe the page(s). Exit with exception for any invalid page. */
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra);
+
+ /* Handle watchpoints for all active elements. */
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
+ BP_MEM_WRITE, ra);
+
+ /*
+ * Handle mte checks for all active elements.
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
+ */
+ if (mtedesc) {
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
+ mtedesc, ra);
+ }
+
+ flags = info.page[0].flags | info.page[1].flags;
+ if (unlikely(flags != 0)) {
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ /*
+ * At least one page includes MMIO.
+ * Any bus operation can fail with cpu_transaction_failed,
+ * which for ARM will raise SyncExternal. We cannot avoid
+ * this fault and will leave with the store incomplete.
+ */
+ reg_off = info.reg_off_first[0];
+ reg_last = info.reg_off_last[1];
+ if (reg_last < 0) {
+ reg_last = info.reg_off_split;
+ if (reg_last < 0) {
+ reg_last = info.reg_off_last[0];
+ }
+ }
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
+ }
+ reg_off += esize;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+ return;
+#endif
+ }
+
+ reg_off = info.reg_off_first[0];
+ reg_last = info.reg_off_last[0];
+ host = info.page[0].host;
+
+ while (reg_off <= reg_last) {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ host_fn(za, reg_off, host + reg_off);
+ }
+ reg_off += 1 << esz;
+ } while (reg_off <= reg_last && (reg_off & 63));
+ }
+
+ /*
+ * Use the slow path to manage the cross-page misalignment.
+ * But we know this is RAM and cannot trap.
+ */
+ reg_off = info.reg_off_split;
+ if (unlikely(reg_off >= 0)) {
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
+ }
+
+ reg_off = info.reg_off_first[1];
+ if (unlikely(reg_off >= 0)) {
+ reg_last = info.reg_off_last[1];
+ host = info.page[1].host;
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ host_fn(za, reg_off, host + reg_off);
+ }
+ reg_off += 1 << esz;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+ }
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr,
+ uint32_t desc, uintptr_t ra, int esz, bool vertical,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ int bit55 = extract64(addr, 55, 1);
+
+ /* Remove mtedesc from the normal sve descriptor. */
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+
+ /* Perform gross MTE suppression early. */
+ if (!tbi_check(desc, bit55) ||
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ mtedesc = 0;
+ }
+
+ sme_st1(env, za, vg, addr, desc, ra, esz, mtedesc,
+ vertical, host_fn, tlb_fn);
+}
+
+#define DO_ST(L, END, ESZ) \
+void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
+} \
+void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
+} \
+void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
+} \
+void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
+}
+
+DO_ST(b, , MO_8)
+DO_ST(h, _be, MO_16)
+DO_ST(h, _le, MO_16)
+DO_ST(s, _be, MO_32)
+DO_ST(s, _le, MO_32)
+DO_ST(d, _be, MO_64)
+DO_ST(d, _le, MO_64)
+DO_ST(q, _be, MO_128)
+DO_ST(q, _le, MO_128)
+
+#undef DO_ST
+
+void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn,
+ void *vpm, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
+ uint64_t *pn = vpn, *pm = vpm;
+ uint32_t *zda = vzda, *zn = vzn;
+
+ for (row = 0; row < oprsz; ) {
+ uint64_t pa = pn[row >> 4];
+ do {
+ if (pa & 1) {
+ for (col = 0; col < oprsz; ) {
+ uint64_t pb = pm[col >> 4];
+ do {
+ if (pb & 1) {
+ zda[tile_vslice_index(row) + H4(col)] += zn[H4(col)];
+ }
+ pb >>= 4;
+ } while (++col & 15);
+ }
+ }
+ pa >>= 4;
+ } while (++row & 15);
+ }
+}
+
+void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn,
+ void *vpm, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
+ uint8_t *pn = vpn, *pm = vpm;
+ uint64_t *zda = vzda, *zn = vzn;
+
+ for (row = 0; row < oprsz; ++row) {
+ if (pn[H1(row)] & 1) {
+ for (col = 0; col < oprsz; ++col) {
+ if (pm[H1(col)] & 1) {
+ zda[tile_vslice_index(row) + col] += zn[col];
+ }
+ }
+ }
+ }
+}
+
+void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn,
+ void *vpm, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
+ uint64_t *pn = vpn, *pm = vpm;
+ uint32_t *zda = vzda, *zn = vzn;
+
+ for (row = 0; row < oprsz; ) {
+ uint64_t pa = pn[row >> 4];
+ do {
+ if (pa & 1) {
+ uint32_t zn_row = zn[H4(row)];
+ for (col = 0; col < oprsz; ) {
+ uint64_t pb = pm[col >> 4];
+ do {
+ if (pb & 1) {
+ zda[tile_vslice_index(row) + H4(col)] += zn_row;
+ }
+ pb >>= 4;
+ } while (++col & 15);
+ }
+ }
+ pa >>= 4;
+ } while (++row & 15);
+ }
+}
+
+void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
+ void *vpm, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
+ uint8_t *pn = vpn, *pm = vpm;
+ uint64_t *zda = vzda, *zn = vzn;
+
+ for (row = 0; row < oprsz; ++row) {
+ if (pn[H1(row)] & 1) {
+ uint64_t zn_row = zn[row];
+ for (col = 0; col < oprsz; ++col) {
+ if (pm[H1(col)] & 1) {
+ zda[tile_vslice_index(row) + col] += zn_row;
+ }
+ }
+ }
+ }
+}
+
+void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, void *vst, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_maxsz(desc);
+ uint32_t neg = simd_data(desc) << 31;
+ uint16_t *pn = vpn, *pm = vpm;
+ float_status fpst;
+
+ /*
+ * Make a copy of float_status because this operation does not
+ * update the cumulative fp exception status. It also produces
+ * default nans.
+ */
+ fpst = *(float_status *)vst;
+ set_default_nan_mode(true, &fpst);
+
+ for (row = 0; row < oprsz; ) {
+ uint16_t pa = pn[H2(row >> 4)];
+ do {
+ if (pa & 1) {
+ void *vza_row = vza + tile_vslice_offset(row);
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg;
+
+ for (col = 0; col < oprsz; ) {
+ uint16_t pb = pm[H2(col >> 4)];
+ do {
+ if (pb & 1) {
+ uint32_t *a = vza_row + H1_4(col);
+ uint32_t *m = vzm + H1_4(col);
+ *a = float32_muladd(n, *m, *a, 0, vst);
+ }
+ col += 4;
+ pb >>= 4;
+ } while (col & 15);
+ }
+ }
+ row += 4;
+ pa >>= 4;
+ } while (row & 15);
+ }
+}
+
+void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, void *vst, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
+ uint64_t neg = (uint64_t)simd_data(desc) << 63;
+ uint64_t *za = vza, *zn = vzn, *zm = vzm;
+ uint8_t *pn = vpn, *pm = vpm;
+ float_status fpst = *(float_status *)vst;
+
+ set_default_nan_mode(true, &fpst);
+
+ for (row = 0; row < oprsz; ++row) {
+ if (pn[H1(row)] & 1) {
+ uint64_t *za_row = &za[tile_vslice_index(row)];
+ uint64_t n = zn[row] ^ neg;
+
+ for (col = 0; col < oprsz; ++col) {
+ if (pm[H1(col)] & 1) {
+ uint64_t *a = &za_row[col];
+ *a = float64_muladd(n, zm[col], *a, 0, &fpst);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Alter PAIR as needed for controlling predicates being false,
+ * and for NEG on an enabled row element.
+ */
+static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
+{
+ /*
+ * The pseudocode uses a conditional negate after the conditional zero.
+ * It is simpler here to unconditionally negate before conditional zero.
+ */
+ pair ^= neg;
+ if (!(pg & 1)) {
+ pair &= 0xffff0000u;
+ }
+ if (!(pg & 4)) {
+ pair &= 0x0000ffffu;
+ }
+ return pair;
+}
+
+static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2,
+ float_status *s_std, float_status *s_odd)
+{
+ float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std);
+ float64 e1c = float16_to_float64(e1 >> 16, true, s_std);
+ float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std);
+ float64 e2c = float16_to_float64(e2 >> 16, true, s_std);
+ float64 t64;
+ float32 t32;
+
+ /*
+ * The ARM pseudocode function FPDot performs both multiplies
+ * and the add with a single rounding operation. Emulate this
+ * by performing the first multiply in round-to-odd, then doing
+ * the second multiply as fused multiply-add, and rounding to
+ * float32 all in one step.
+ */
+ t64 = float64_mul(e1r, e2r, s_odd);
+ t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std);
+
+ /* This conversion is exact, because we've already rounded. */
+ t32 = float64_to_float32(t64, s_std);
+
+ /* The final accumulation step is not fused. */
+ return float32_add(sum, t32, s_std);
+}
+
+void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, void *vst, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_maxsz(desc);
+ uint32_t neg = simd_data(desc) * 0x80008000u;
+ uint16_t *pn = vpn, *pm = vpm;
+ float_status fpst_odd, fpst_std;
+
+ /*
+ * Make a copy of float_status because this operation does not
+ * update the cumulative fp exception status. It also produces
+ * default nans. Make a second copy with round-to-odd -- see above.
+ */
+ fpst_std = *(float_status *)vst;
+ set_default_nan_mode(true, &fpst_std);
+ fpst_odd = fpst_std;
+ set_float_rounding_mode(float_round_to_odd, &fpst_odd);
+
+ for (row = 0; row < oprsz; ) {
+ uint16_t prow = pn[H2(row >> 4)];
+ do {
+ void *vza_row = vza + tile_vslice_offset(row);
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
+
+ n = f16mop_adj_pair(n, prow, neg);
+
+ for (col = 0; col < oprsz; ) {
+ uint16_t pcol = pm[H2(col >> 4)];
+ do {
+ if (prow & pcol & 0b0101) {
+ uint32_t *a = vza_row + H1_4(col);
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
+
+ m = f16mop_adj_pair(m, pcol, 0);
+ *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd);
+
+ col += 4;
+ pcol >>= 4;
+ }
+ } while (col & 15);
+ }
+ row += 4;
+ prow >>= 4;
+ } while (row & 15);
+ }
+}
+
+void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_maxsz(desc);
+ uint32_t neg = simd_data(desc) * 0x80008000u;
+ uint16_t *pn = vpn, *pm = vpm;
+
+ for (row = 0; row < oprsz; ) {
+ uint16_t prow = pn[H2(row >> 4)];
+ do {
+ void *vza_row = vza + tile_vslice_offset(row);
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
+
+ n = f16mop_adj_pair(n, prow, neg);
+
+ for (col = 0; col < oprsz; ) {
+ uint16_t pcol = pm[H2(col >> 4)];
+ do {
+ if (prow & pcol & 0b0101) {
+ uint32_t *a = vza_row + H1_4(col);
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
+
+ m = f16mop_adj_pair(m, pcol, 0);
+ *a = bfdotadd(*a, n, m);
+
+ col += 4;
+ pcol >>= 4;
+ }
+ } while (col & 15);
+ }
+ row += 4;
+ prow >>= 4;
+ } while (row & 15);
+ }
+}
+
+typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool);
+
+static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm,
+ uint8_t *pn, uint8_t *pm,
+ uint32_t desc, IMOPFn *fn)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
+ bool neg = simd_data(desc);
+
+ for (row = 0; row < oprsz; ++row) {
+ uint8_t pa = pn[H1(row)];
+ uint64_t *za_row = &za[tile_vslice_index(row)];
+ uint64_t n = zn[row];
+
+ for (col = 0; col < oprsz; ++col) {
+ uint8_t pb = pm[H1(col)];
+ uint64_t *a = &za_row[col];
+
+ *a = fn(n, zm[col], *a, pa & pb, neg);
+ }
+ }
+}
+
+#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
+{ \
+ uint32_t sum0 = 0, sum1 = 0; \
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
+ n &= expand_pred_b(p); \
+ sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
+ sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \
+ sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
+ sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \
+ sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
+ sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \
+ sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
+ sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \
+ if (neg) { \
+ sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \
+ } else { \
+ sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \
+ } \
+ return ((uint64_t)sum1 << 32) | sum0; \
+}
+
+#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
+{ \
+ uint64_t sum = 0; \
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
+ n &= expand_pred_h(p); \
+ sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
+ sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
+ sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
+ sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
+ return neg ? a - sum : a + sum; \
+}
+
+DEF_IMOP_32(smopa_s, int8_t, int8_t)
+DEF_IMOP_32(umopa_s, uint8_t, uint8_t)
+DEF_IMOP_32(sumopa_s, int8_t, uint8_t)
+DEF_IMOP_32(usmopa_s, uint8_t, int8_t)
+
+DEF_IMOP_64(smopa_d, int16_t, int16_t)
+DEF_IMOP_64(umopa_d, uint16_t, uint16_t)
+DEF_IMOP_64(sumopa_d, int16_t, uint16_t)
+DEF_IMOP_64(usmopa_d, uint16_t, int16_t)
+
+#define DEF_IMOPH(NAME) \
+ void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \
+ void *vpm, uint32_t desc) \
+ { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); }
+
+DEF_IMOPH(smopa_s)
+DEF_IMOPH(umopa_s)
+DEF_IMOPH(sumopa_s)
+DEF_IMOPH(usmopa_s)
+DEF_IMOPH(smopa_d)
+DEF_IMOPH(umopa_d)
+DEF_IMOPH(sumopa_d)
+DEF_IMOPH(usmopa_d)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index a54feb2f61..14b3a69c36 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -449,14 +449,17 @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5
# SVE index generation (register start, register increment)
INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm
-### SVE Stack Allocation Group
+### SVE / Streaming SVE Stack Allocation Group
# SVE stack frame adjustment
ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6
+ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6
ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6
+ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6
# SVE stack frame size
RDVL 00000100 101 11111 01010 imm:s6 rd:5
+RDSVL 00000100 101 11111 01011 imm:s6 rd:5
### SVE Bitwise Shift - Unpredicated Group
@@ -649,6 +652,7 @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn
REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
+REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0
# SVE vector splice (predicated, destructive)
SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
@@ -1183,10 +1187,10 @@ LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \
@rpri_load_msz nreg=0
# SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets)
-PRF 1000010 00 -1 ----- 0-- --- ----- 0 ----
+PRF_ns 1000010 00 -1 ----- 0-- --- ----- 0 ----
# SVE 32-bit gather prefetch (vector plus immediate)
-PRF 1000010 -- 00 ----- 111 --- ----- 0 ----
+PRF_ns 1000010 -- 00 ----- 111 --- ----- 0 ----
# SVE contiguous prefetch (scalar plus immediate)
PRF 1000010 11 1- ----- 0-- --- ----- 0 ----
@@ -1223,13 +1227,13 @@ LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \
@rpri_g_load esz=3
# SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets)
-PRF 1100010 00 11 ----- 1-- --- ----- 0 ----
+PRF_ns 1100010 00 11 ----- 1-- --- ----- 0 ----
# SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets)
-PRF 1100010 00 -1 ----- 0-- --- ----- 0 ----
+PRF_ns 1100010 00 -1 ----- 0-- --- ----- 0 ----
# SVE 64-bit gather prefetch (vector plus immediate)
-PRF 1100010 -- 00 ----- 111 --- ----- 0 ----
+PRF_ns 1100010 -- 00 ----- 111 --- ----- 0 ----
### SVE Memory Store Group
@@ -1671,3 +1675,28 @@ BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
### SVE2 floating-point bfloat16 dot-product (indexed)
BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
+
+### SVE broadcast predicate element
+
+&psel esz pd pn pm rv imm
+%psel_rv 16:2 !function=plus_12
+%psel_imm_b 22:2 19:2
+%psel_imm_h 22:2 20:1
+%psel_imm_s 22:2
+%psel_imm_d 23:1
+@psel ........ .. . ... .. .. pn:4 . pm:4 . pd:4 \
+ &psel rv=%psel_rv
+
+PSEL 00100101 .. 1 ..1 .. 01 .... 0 .... 0 .... \
+ @psel esz=0 imm=%psel_imm_b
+PSEL 00100101 .. 1 .10 .. 01 .... 0 .... 0 .... \
+ @psel esz=1 imm=%psel_imm_h
+PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \
+ @psel esz=2 imm=%psel_imm_s
+PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
+ @psel esz=3 imm=%psel_imm_d
+
+### SVE clamp
+
+SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm
+UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 0c6379e6e8..d6f7ef94fe 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -931,6 +931,22 @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64)
DO_ZPZ_D(sve_revw_d, uint64_t, wswap64)
+void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 2) {
+ if (pg[H1(i)] & 1) {
+ uint64_t n0 = n[i + 0];
+ uint64_t n1 = n[i + 1];
+ d[i + 0] = n1;
+ d[i + 1] = n0;
+ }
+ }
+}
+
DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8)
DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
@@ -3565,6 +3581,18 @@ void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm,
}
}
+void HELPER(sve_sel_zpzz_q)(void *vd, void *vn, void *vm,
+ void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 16;
+ Int128 *d = vd, *n = vn, *m = vm;
+ uint16_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = (pg[H2(i)] & 1 ? n : m)[i];
+ }
+}
+
/* Two operand comparison controlled by a predicate.
* ??? It is very tempting to want to be able to expand this inline
* with x86 instructions, e.g.
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index c86b97b1d4..b7b64f7358 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -1155,7 +1155,7 @@ static void do_vec_ld(DisasContext *s, int destidx, int element,
* unallocated-encoding checks (otherwise the syndrome information
* for the resulting exception will be incorrect).
*/
-static bool fp_access_check(DisasContext *s)
+static bool fp_access_check_only(DisasContext *s)
{
if (s->fp_excp_el) {
assert(!s->fp_access_checked);
@@ -1170,21 +1170,44 @@ static bool fp_access_check(DisasContext *s)
return true;
}
-/* Check that SVE access is enabled. If it is, return true.
+static bool fp_access_check(DisasContext *s)
+{
+ if (!fp_access_check_only(s)) {
+ return false;
+ }
+ if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_smetrap(SME_ET_Streaming, false));
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Check that SVE access is enabled. If it is, return true.
* If not, emit code to generate an appropriate exception and return false.
+ * This function corresponds to CheckSVEEnabled().
*/
bool sve_access_check(DisasContext *s)
{
- if (s->sve_excp_el) {
- assert(!s->sve_access_checked);
- s->sve_access_checked = true;
-
+ if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
+ assert(dc_isar_feature(aa64_sme, s));
+ if (!sme_sm_enabled_check(s)) {
+ goto fail_exit;
+ }
+ } else if (s->sve_excp_el) {
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
syn_sve_access_trap(), s->sve_excp_el);
- return false;
+ goto fail_exit;
}
s->sve_access_checked = true;
return fp_access_check(s);
+
+ fail_exit:
+ /* Assert that we only raise one exception per instruction. */
+ assert(!s->sve_access_checked);
+ s->sve_access_checked = true;
+ return false;
}
/*
@@ -1203,6 +1226,40 @@ static bool sme_access_check(DisasContext *s)
return true;
}
+/* This function corresponds to CheckSMEEnabled. */
+bool sme_enabled_check(DisasContext *s)
+{
+ /*
+ * Note that unlike sve_excp_el, we have not constrained sme_excp_el
+ * to be zero when fp_excp_el has priority. This is because we need
+ * sme_excp_el by itself for cpregs access checks.
+ */
+ if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
+ s->fp_access_checked = true;
+ return sme_access_check(s);
+ }
+ return fp_access_check_only(s);
+}
+
+/* Common subroutine for CheckSMEAnd*Enabled. */
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
+{
+ if (!sme_enabled_check(s)) {
+ return false;
+ }
+ if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_smetrap(SME_ET_NotStreaming, false));
+ return false;
+ }
+ if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_smetrap(SME_ET_InactiveZA, false));
+ return false;
+ }
+ return true;
+}
+
/*
* This utility function is for doing register extension with an
* optional shift. You will likely want to pass a temporary for the
@@ -1994,7 +2051,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
default:
g_assert_not_reached();
}
- if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
+ if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
return;
} else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
return;
@@ -14530,6 +14587,23 @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
}
}
+/*
+ * Include the generated SME FA64 decoder.
+ */
+
+#include "decode-sme-fa64.c.inc"
+
+static bool trans_OK(DisasContext *s, arg_OK *a)
+{
+ return true;
+}
+
+static bool trans_FAIL(DisasContext *s, arg_OK *a)
+{
+ s->is_nonstreaming = true;
+ return true;
+}
+
/**
* is_guarded_page:
* @env: The cpu environment
@@ -14657,6 +14731,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
+ dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = arm_cpu->cp_regs;
@@ -14805,8 +14880,18 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
}
}
+ s->is_nonstreaming = false;
+ if (s->sme_trap_nonstreaming) {
+ disas_sme_fa64(s, insn);
+ }
+
switch (extract32(insn, 25, 4)) {
- case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
+ case 0x0:
+ if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
+ unallocated_encoding(s);
+ }
+ break;
+ case 0x1: case 0x3: /* UNALLOCATED */
unallocated_encoding(s);
break;
case 0x2:
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
index f0970c6b8c..ad3762d1ac 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/translate-a64.h
@@ -29,6 +29,27 @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
unsigned int imms, unsigned int immr);
bool sve_access_check(DisasContext *s);
+bool sme_enabled_check(DisasContext *s);
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned);
+
+/* This function corresponds to CheckStreamingSVEEnabled. */
+static inline bool sme_sm_enabled_check(DisasContext *s)
+{
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK);
+}
+
+/* This function corresponds to CheckSMEAndZAEnabled. */
+static inline bool sme_za_enabled_check(DisasContext *s)
+{
+ return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK);
+}
+
+/* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */
+static inline bool sme_smza_enabled_check(DisasContext *s)
+{
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
+}
+
TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
bool tag_checked, int log2_size);
@@ -107,6 +128,12 @@ static inline int vec_full_reg_size(DisasContext *s)
return s->vl;
}
+/* Return the byte size of the vector register, SVL / 8. */
+static inline int streaming_vec_reg_size(DisasContext *s)
+{
+ return s->svl;
+}
+
/*
* Return the offset info CPUARMState of the predicate vector register Pn.
* Note for this purpose, FFR is P16.
@@ -122,6 +149,12 @@ static inline int pred_full_reg_size(DisasContext *s)
return s->vl >> 3;
}
+/* Return the byte size of the predicate register, SVL / 64. */
+static inline int streaming_pred_reg_size(DisasContext *s)
+{
+ return s->svl >> 3;
+}
+
/*
* Round up the size of a register to a size allowed by
* the tcg vector infrastructure. Any operation which uses this
@@ -145,7 +178,16 @@ static inline int pred_gvec_reg_size(DisasContext *s)
return size_for_gvec(pred_full_reg_size(s));
}
+/* Return a newly allocated pointer to the predicate register. */
+static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno)
+{
+ TCGv_ptr ret = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno));
+ return ret;
+}
+
bool disas_sve(DisasContext *, uint32_t);
+bool disas_sme(DisasContext *, uint32_t);
void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
@@ -153,4 +195,7 @@ void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, int64_t shift,
uint32_t opr_sz, uint32_t max_sz);
+void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
+void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
+
#endif /* TARGET_ARM_TRANSLATE_A64_H */
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
new file mode 100644
index 0000000000..7b87a9df63
--- /dev/null
+++ b/target/arm/translate-sme.c
@@ -0,0 +1,373 @@
+/*
+ * AArch64 SME translation
+ *
+ * Copyright (c) 2022 Linaro, Ltd
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "translate.h"
+#include "exec/helper-gen.h"
+#include "translate-a64.h"
+#include "fpu/softfloat.h"
+
+
+/*
+ * Include the generated decoder.
+ */
+
+#include "decode-sme.c.inc"
+
+
+/*
+ * Resolve tile.size[index] to a host pointer, where tile and index
+ * are always decoded together, dependent on the element size.
+ */
+static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
+ int tile_index, bool vertical)
+{
+ int tile = tile_index >> (4 - esz);
+ int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz);
+ int pos, len, offset;
+ TCGv_i32 tmp;
+ TCGv_ptr addr;
+
+ /* Compute the final index, which is Rs+imm. */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs));
+ tcg_gen_addi_i32(tmp, tmp, index);
+
+ /* Prepare a power-of-two modulo via extraction of @len bits. */
+ len = ctz32(streaming_vec_reg_size(s)) - esz;
+
+ if (vertical) {
+ /*
+ * Compute the byte offset of the index within the tile:
+ * (index % (svl / size)) * size
+ * = (index % (svl >> esz)) << esz
+ * Perform the power-of-two modulo via extraction of the low @len bits.
+ * Perform the multiply by shifting left by @pos bits.
+ * Perform these operations simultaneously via deposit into zero.
+ */
+ pos = esz;
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
+
+ /*
+ * For big-endian, adjust the indexed column byte offset within
+ * the uint64_t host words that make up env->zarray[].
+ */
+ if (HOST_BIG_ENDIAN && esz < MO_64) {
+ tcg_gen_xori_i32(tmp, tmp, 8 - (1 << esz));
+ }
+ } else {
+ /*
+ * Compute the byte offset of the index within the tile:
+ * (index % (svl / size)) * (size * sizeof(row))
+ * = (index % (svl >> esz)) << (esz + log2(sizeof(row)))
+ */
+ pos = esz + ctz32(sizeof(ARMVectorReg));
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
+
+ /* Row slices are always aligned and need no endian adjustment. */
+ }
+
+ /* The tile byte offset within env->zarray is the row. */
+ offset = tile * sizeof(ARMVectorReg);
+
+ /* Include the byte offset of zarray to make this relative to env. */
+ offset += offsetof(CPUARMState, zarray);
+ tcg_gen_addi_i32(tmp, tmp, offset);
+
+ /* Add the byte offset to env to produce the final pointer. */
+ addr = tcg_temp_new_ptr();
+ tcg_gen_ext_i32_ptr(addr, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_add_ptr(addr, addr, cpu_env);
+
+ return addr;
+}
+
+static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
+{
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (sme_za_enabled_check(s)) {
+ gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm),
+ tcg_constant_i32(streaming_vec_reg_size(s)));
+ }
+ return true;
+}
+
+static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
+{
+ static gen_helper_gvec_4 * const h_fns[5] = {
+ gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
+ gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d,
+ gen_helper_sve_sel_zpzz_q
+ };
+ static gen_helper_gvec_3 * const cz_fns[5] = {
+ gen_helper_sme_mova_cz_b, gen_helper_sme_mova_cz_h,
+ gen_helper_sme_mova_cz_s, gen_helper_sme_mova_cz_d,
+ gen_helper_sme_mova_cz_q,
+ };
+ static gen_helper_gvec_3 * const zc_fns[5] = {
+ gen_helper_sme_mova_zc_b, gen_helper_sme_mova_zc_h,
+ gen_helper_sme_mova_zc_s, gen_helper_sme_mova_zc_d,
+ gen_helper_sme_mova_zc_q,
+ };
+
+ TCGv_ptr t_za, t_zr, t_pg;
+ TCGv_i32 t_desc;
+ int svl;
+
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
+ t_zr = vec_full_reg_ptr(s, a->zr);
+ t_pg = pred_full_reg_ptr(s, a->pg);
+
+ svl = streaming_vec_reg_size(s);
+ t_desc = tcg_constant_i32(simd_desc(svl, svl, 0));
+
+ if (a->v) {
+ /* Vertical slice -- use sme mova helpers. */
+ if (a->to_vec) {
+ zc_fns[a->esz](t_zr, t_za, t_pg, t_desc);
+ } else {
+ cz_fns[a->esz](t_za, t_zr, t_pg, t_desc);
+ }
+ } else {
+ /* Horizontal slice -- reuse sve sel helpers. */
+ if (a->to_vec) {
+ h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc);
+ } else {
+ h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc);
+ }
+ }
+
+ tcg_temp_free_ptr(t_za);
+ tcg_temp_free_ptr(t_zr);
+ tcg_temp_free_ptr(t_pg);
+
+ return true;
+}
+
+static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
+{
+ typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32);
+
+ /*
+ * Indexed by [esz][be][v][mte][st], which is (except for load/store)
+ * also the order in which the elements appear in the function names,
+ * and so how we must concatenate the pieces.
+ */
+
+#define FN_LS(F) { gen_helper_sme_ld1##F, gen_helper_sme_st1##F }
+#define FN_MTE(F) { FN_LS(F), FN_LS(F##_mte) }
+#define FN_HV(F) { FN_MTE(F##_h), FN_MTE(F##_v) }
+#define FN_END(L, B) { FN_HV(L), FN_HV(B) }
+
+ static GenLdSt1 * const fns[5][2][2][2][2] = {
+ FN_END(b, b),
+ FN_END(h_le, h_be),
+ FN_END(s_le, s_be),
+ FN_END(d_le, d_be),
+ FN_END(q_le, q_be),
+ };
+
+#undef FN_LS
+#undef FN_MTE
+#undef FN_HV
+#undef FN_END
+
+ TCGv_ptr t_za, t_pg;
+ TCGv_i64 addr;
+ int svl, desc = 0;
+ bool be = s->be_data == MO_BE;
+ bool mte = s->mte_active[0];
+
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
+ t_pg = pred_full_reg_ptr(s, a->pg);
+ addr = tcg_temp_new_i64();
+
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz);
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
+
+ if (mte) {
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st);
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1);
+ desc <<= SVE_MTEDESC_SHIFT;
+ } else {
+ addr = clean_data_tbi(s, addr);
+ }
+ svl = streaming_vec_reg_size(s);
+ desc = simd_desc(svl, svl, desc);
+
+ fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr,
+ tcg_constant_i32(desc));
+
+ tcg_temp_free_ptr(t_za);
+ tcg_temp_free_ptr(t_pg);
+ tcg_temp_free_i64(addr);
+ return true;
+}
+
+typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int);
+
+static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
+{
+ int svl = streaming_vec_reg_size(s);
+ int imm = a->imm;
+ TCGv_ptr base;
+
+ if (!sme_za_enabled_check(s)) {
+ return true;
+ }
+
+ /* ZA[n] equates to ZA0H.B[n]. */
+ base = get_tile_rowcol(s, MO_8, a->rv, imm, false);
+
+ fn(s, base, 0, svl, a->rn, imm * svl);
+
+ tcg_temp_free_ptr(base);
+ return true;
+}
+
+TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
+TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
+
+static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz,
+ gen_helper_gvec_4 *fn)
+{
+ int svl = streaming_vec_reg_size(s);
+ uint32_t desc = simd_desc(svl, svl, 0);
+ TCGv_ptr za, zn, pn, pm;
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ /* Sum XZR+zad to find ZAd. */
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
+ zn = vec_full_reg_ptr(s, a->zn);
+ pn = pred_full_reg_ptr(s, a->pn);
+ pm = pred_full_reg_ptr(s, a->pm);
+
+ fn(za, zn, pn, pm, tcg_constant_i32(desc));
+
+ tcg_temp_free_ptr(za);
+ tcg_temp_free_ptr(zn);
+ tcg_temp_free_ptr(pn);
+ tcg_temp_free_ptr(pm);
+ return true;
+}
+
+TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
+TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
+TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
+TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
+
+static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz,
+ gen_helper_gvec_5 *fn)
+{
+ int svl = streaming_vec_reg_size(s);
+ uint32_t desc = simd_desc(svl, svl, a->sub);
+ TCGv_ptr za, zn, zm, pn, pm;
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ /* Sum XZR+zad to find ZAd. */
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
+ zn = vec_full_reg_ptr(s, a->zn);
+ zm = vec_full_reg_ptr(s, a->zm);
+ pn = pred_full_reg_ptr(s, a->pn);
+ pm = pred_full_reg_ptr(s, a->pm);
+
+ fn(za, zn, zm, pn, pm, tcg_constant_i32(desc));
+
+ tcg_temp_free_ptr(za);
+ tcg_temp_free_ptr(zn);
+ tcg_temp_free_ptr(pn);
+ tcg_temp_free_ptr(pm);
+ return true;
+}
+
+static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
+ gen_helper_gvec_5_ptr *fn)
+{
+ int svl = streaming_vec_reg_size(s);
+ uint32_t desc = simd_desc(svl, svl, a->sub);
+ TCGv_ptr za, zn, zm, pn, pm, fpst;
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ /* Sum XZR+zad to find ZAd. */
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
+ zn = vec_full_reg_ptr(s, a->zn);
+ zm = vec_full_reg_ptr(s, a->zm);
+ pn = pred_full_reg_ptr(s, a->pn);
+ pm = pred_full_reg_ptr(s, a->pm);
+ fpst = fpstatus_ptr(FPST_FPCR);
+
+ fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc));
+
+ tcg_temp_free_ptr(za);
+ tcg_temp_free_ptr(zn);
+ tcg_temp_free_ptr(pn);
+ tcg_temp_free_ptr(pm);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_h)
+TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
+TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
+
+/* TODO: FEAT_EBF16 */
+TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
+
+TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s)
+TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s)
+TRANS_FEAT(SUMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_sumopa_s)
+TRANS_FEAT(USMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_usmopa_s)
+
+TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_d)
+TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d)
+TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d)
+TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d)
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 62b5f3040c..41f8b12259 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -1286,6 +1286,19 @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
return true;
}
+static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a)
+{
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (sme_enabled_check(s)) {
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s));
+ }
+ return true;
+}
+
static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
{
if (!dc_isar_feature(aa64_sve, s)) {
@@ -1299,6 +1312,19 @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
return true;
}
+static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a)
+{
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (sme_enabled_check(s)) {
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s));
+ }
+ return true;
+}
+
static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
{
if (!dc_isar_feature(aa64_sve, s)) {
@@ -1311,6 +1337,18 @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
return true;
}
+static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a)
+{
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (sme_enabled_check(s)) {
+ TCGv_i64 reg = cpu_reg(s, a->rd);
+ tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s));
+ }
+ return true;
+}
+
/*
*** SVE Compute Vector Address Group
*/
@@ -1320,10 +1358,10 @@ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
}
-TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
-TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
-TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
-TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
+TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
+TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
+TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
+TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
/*
*** SVE Integer Misc - Unpredicated Group
@@ -1333,14 +1371,15 @@ static gen_helper_gvec_2 * const fexpa_fns[4] = {
NULL, gen_helper_sve_fexpa_h,
gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
};
-TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz,
- fexpa_fns[a->esz], a->rd, a->rn, 0)
+TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz,
+ fexpa_fns[a->esz], a->rd, a->rn, 0)
static gen_helper_gvec_3 * const ftssel_fns[4] = {
NULL, gen_helper_sve_ftssel_h,
gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
};
-TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0)
+TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz,
+ ftssel_fns[a->esz], a, 0)
/*
*** SVE Predicate Logical Operations Group
@@ -1785,7 +1824,8 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
/* Note pat == 31 is #all, to set all elements. */
-TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false)
+TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve,
+ do_predset, 0, FFR_PRED_NUM, 31, false)
/* Note pat == 32 is #unimp, to set no elements. */
TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false)
@@ -1799,11 +1839,13 @@ static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
.rd = a->rd, .pg = a->pg, .s = a->s,
.rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
};
+
+ s->is_nonstreaming = true;
return trans_AND_pppp(s, &alt_a);
}
-TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
-TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
+TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
+TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
void (*gen_fn)(TCGv_i32, TCGv_ptr,
@@ -2533,7 +2575,8 @@ TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
static gen_helper_gvec_3 * const compact_fns[4] = {
NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
};
-TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0)
+TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz,
+ compact_fns[a->esz], a, 0)
/* Call the helper that computes the ARM LastActiveElement pseudocode
* function, scaled by the element size. This includes the not found
@@ -2858,6 +2901,8 @@ TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
+TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0)
+
TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz,
gen_helper_sve_splice, a, a->esz)
@@ -3856,9 +3901,9 @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = {
NULL, gen_helper_sve_ftmad_h,
gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d,
};
-TRANS_FEAT(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
- ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
+TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
+ ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
+ a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
/*
*** SVE Floating Point Accumulating Reduction Group
@@ -3881,6 +3926,7 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (!sve_access_check(s)) {
return true;
}
@@ -3918,12 +3964,18 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
DO_FP3(FADD_zzz, fadd)
DO_FP3(FSUB_zzz, fsub)
DO_FP3(FMUL_zzz, fmul)
-DO_FP3(FTSMUL, ftsmul)
DO_FP3(FRECPS, recps)
DO_FP3(FRSQRTS, rsqrts)
#undef DO_FP3
+static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = {
+ NULL, gen_helper_gvec_ftsmul_h,
+ gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d
+};
+TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz,
+ ftsmul_fns[a->esz], a, 0)
+
/*
*** SVE Floating Point Arithmetic - Predicated Group
*/
@@ -4256,7 +4308,8 @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
* The load should begin at the address Rn + IMM.
*/
-static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
+void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
+ int len, int rn, int imm)
{
int len_align = QEMU_ALIGN_DOWN(len, 8);
int len_remain = len % 8;
@@ -4282,7 +4335,7 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
t0 = tcg_temp_new_i64();
for (i = 0; i < len_align; i += 8) {
tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
- tcg_gen_st_i64(t0, cpu_env, vofs + i);
+ tcg_gen_st_i64(t0, base, vofs + i);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
}
tcg_temp_free_i64(t0);
@@ -4295,6 +4348,12 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
clean_addr = new_tmp_a64_local(s);
tcg_gen_mov_i64(clean_addr, t0);
+ if (base != cpu_env) {
+ TCGv_ptr b = tcg_temp_local_new_ptr();
+ tcg_gen_mov_ptr(b, base);
+ base = b;
+ }
+
gen_set_label(loop);
t0 = tcg_temp_new_i64();
@@ -4302,7 +4361,7 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
tp = tcg_temp_new_ptr();
- tcg_gen_add_ptr(tp, cpu_env, i);
+ tcg_gen_add_ptr(tp, base, i);
tcg_gen_addi_ptr(i, i, 8);
tcg_gen_st_i64(t0, tp, vofs);
tcg_temp_free_ptr(tp);
@@ -4310,6 +4369,11 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
tcg_temp_free_ptr(i);
+
+ if (base != cpu_env) {
+ tcg_temp_free_ptr(base);
+ assert(len_remain == 0);
+ }
}
/*
@@ -4338,13 +4402,14 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
default:
g_assert_not_reached();
}
- tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
+ tcg_gen_st_i64(t0, base, vofs + len_align);
tcg_temp_free_i64(t0);
}
}
/* Similarly for stores. */
-static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
+void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
+ int len, int rn, int imm)
{
int len_align = QEMU_ALIGN_DOWN(len, 8);
int len_remain = len % 8;
@@ -4370,7 +4435,7 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
t0 = tcg_temp_new_i64();
for (i = 0; i < len_align; i += 8) {
- tcg_gen_ld_i64(t0, cpu_env, vofs + i);
+ tcg_gen_ld_i64(t0, base, vofs + i);
tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
}
@@ -4384,11 +4449,17 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
clean_addr = new_tmp_a64_local(s);
tcg_gen_mov_i64(clean_addr, t0);
+ if (base != cpu_env) {
+ TCGv_ptr b = tcg_temp_local_new_ptr();
+ tcg_gen_mov_ptr(b, base);
+ base = b;
+ }
+
gen_set_label(loop);
t0 = tcg_temp_new_i64();
tp = tcg_temp_new_ptr();
- tcg_gen_add_ptr(tp, cpu_env, i);
+ tcg_gen_add_ptr(tp, base, i);
tcg_gen_ld_i64(t0, tp, vofs);
tcg_gen_addi_ptr(i, i, 8);
tcg_temp_free_ptr(tp);
@@ -4399,12 +4470,17 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
tcg_temp_free_ptr(i);
+
+ if (base != cpu_env) {
+ tcg_temp_free_ptr(base);
+ assert(len_remain == 0);
+ }
}
/* Predicate register stores can be any multiple of 2. */
if (len_remain) {
t0 = tcg_temp_new_i64();
- tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
+ tcg_gen_ld_i64(t0, base, vofs + len_align);
switch (len_remain) {
case 2:
@@ -4436,7 +4512,7 @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) {
int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd);
- do_ldr(s, off, size, a->rn, a->imm * size);
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
}
return true;
}
@@ -4449,7 +4525,7 @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) {
int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd);
- do_ldr(s, off, size, a->rn, a->imm * size);
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
}
return true;
}
@@ -4462,7 +4538,7 @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) {
int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd);
- do_str(s, off, size, a->rn, a->imm * size);
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
}
return true;
}
@@ -4475,7 +4551,7 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) {
int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd);
- do_str(s, off, size, a->rn, a->imm * size);
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
}
return true;
}
@@ -4793,6 +4869,7 @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
if (!dc_isar_feature(aa64_sve, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (sve_access_check(s)) {
TCGv_i64 addr = new_tmp_a64(s);
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
@@ -4894,6 +4971,7 @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
if (!dc_isar_feature(aa64_sve, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (sve_access_check(s)) {
int vsz = vec_full_reg_size(s);
int elements = vsz >> dtype_esz[a->dtype];
@@ -5048,6 +5126,7 @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
if (a->rm == 31) {
return false;
}
+ s->is_nonstreaming = true;
if (sve_access_check(s)) {
TCGv_i64 addr = new_tmp_a64(s);
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
@@ -5062,6 +5141,7 @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
if (!dc_isar_feature(aa64_sve_f64mm, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (sve_access_check(s)) {
TCGv_i64 addr = new_tmp_a64(s);
tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
@@ -5657,6 +5737,7 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
if (!dc_isar_feature(aa64_sve, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (!sve_access_check(s)) {
return true;
}
@@ -5688,6 +5769,7 @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
if (!dc_isar_feature(aa64_sve, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (!sve_access_check(s)) {
return true;
}
@@ -5722,6 +5804,7 @@ static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
if (!dc_isar_feature(aa64_sve2, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (!sve_access_check(s)) {
return true;
}
@@ -5845,6 +5928,7 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
if (!dc_isar_feature(aa64_sve, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (!sve_access_check(s)) {
return true;
}
@@ -5875,6 +5959,7 @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
if (!dc_isar_feature(aa64_sve, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (!sve_access_check(s)) {
return true;
}
@@ -5909,6 +5994,7 @@ static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
if (!dc_isar_feature(aa64_sve2, s)) {
return false;
}
+ s->is_nonstreaming = true;
if (!sve_access_check(s)) {
return true;
}
@@ -5953,6 +6039,17 @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
return true;
}
+static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a)
+{
+ if (!dc_isar_feature(aa64_sve, s)) {
+ return false;
+ }
+ /* Prefetch is a nop within QEMU. */
+ s->is_nonstreaming = true;
+ (void)sve_access_check(s);
+ return true;
+}
+
/*
* Move Prefix
*
@@ -6181,9 +6278,13 @@ static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
NULL, gen_helper_sve2_pmull_d,
};
- if (a->esz == 0
- ? !dc_isar_feature(aa64_sve2_pmull128, s)
- : !dc_isar_feature(aa64_sve, s)) {
+
+ if (a->esz == 0) {
+ if (!dc_isar_feature(aa64_sve2_pmull128, s)) {
+ return false;
+ }
+ s->is_nonstreaming = true;
+ } else if (!dc_isar_feature(aa64_sve, s)) {
return false;
}
return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
@@ -6371,22 +6472,22 @@ static gen_helper_gvec_3 * const bext_fns[4] = {
gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
};
-TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
- bext_fns[a->esz], a, 0)
+TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
+ bext_fns[a->esz], a, 0)
static gen_helper_gvec_3 * const bdep_fns[4] = {
gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
};
-TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
- bdep_fns[a->esz], a, 0)
+TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
+ bdep_fns[a->esz], a, 0)
static gen_helper_gvec_3 * const bgrp_fns[4] = {
gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
};
-TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
- bgrp_fns[a->esz], a, 0)
+TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
+ bgrp_fns[a->esz], a, 0)
static gen_helper_gvec_3 * const cadd_fns[4] = {
gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
@@ -7094,21 +7195,21 @@ DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
static gen_helper_gvec_flags_4 * const match_fns[4] = {
gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL
};
-TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
+TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
static gen_helper_gvec_flags_4 * const nmatch_fns[4] = {
gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL
};
-TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
+TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
static gen_helper_gvec_4 * const histcnt_fns[4] = {
NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
};
-TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
- histcnt_fns[a->esz], a, 0)
+TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
+ histcnt_fns[a->esz], a, 0)
-TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
- a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
+TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
+ a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz)
DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz)
@@ -7120,10 +7221,12 @@ DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz)
* SVE Integer Multiply-Add (unpredicated)
*/
-TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s,
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
-TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d,
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
+TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz,
+ gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra,
+ 0, FPST_FPCR)
+TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz,
+ gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra,
+ 0, FPST_FPCR)
static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
NULL, gen_helper_sve2_sqdmlal_zzzw_h,
@@ -7220,20 +7323,21 @@ TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
-TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
- gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
+TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
+ gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
-TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
- gen_helper_crypto_aese, a, false)
-TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
- gen_helper_crypto_aese, a, true)
+TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
+ gen_helper_crypto_aese, a, false)
+TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
+ gen_helper_crypto_aese, a, true)
-TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
- gen_helper_crypto_sm4e, a, 0)
-TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
- gen_helper_crypto_sm4ekey, a, 0)
+TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
+ gen_helper_crypto_sm4e, a, 0)
+TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
+ gen_helper_crypto_sm4ekey, a, 0)
-TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a)
+TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz,
+ gen_gvec_rax1, a)
TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz,
gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR)
@@ -7284,20 +7388,20 @@ TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true)
TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false)
TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true)
-TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
- gen_helper_gvec_smmla_b, a, 0)
-TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
- gen_helper_gvec_usmmla_b, a, 0)
-TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
- gen_helper_gvec_ummla_b, a, 0)
+TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
+ gen_helper_gvec_smmla_b, a, 0)
+TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
+ gen_helper_gvec_usmmla_b, a, 0)
+TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
+ gen_helper_gvec_ummla_b, a, 0)
TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
gen_helper_gvec_bfdot, a, 0)
TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
gen_helper_gvec_bfdot_idx, a)
-TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
- gen_helper_gvec_bfmmla, a, 0)
+TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
+ gen_helper_gvec_bfmmla, a, 0)
static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
{
@@ -7317,3 +7421,162 @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true)
+
+static bool trans_PSEL(DisasContext *s, arg_psel *a)
+{
+ int vl = vec_full_reg_size(s);
+ int pl = pred_gvec_reg_size(s);
+ int elements = vl >> a->esz;
+ TCGv_i64 tmp, didx, dbit;
+ TCGv_ptr ptr;
+
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ dbit = tcg_temp_new_i64();
+ didx = tcg_temp_new_i64();
+ ptr = tcg_temp_new_ptr();
+
+ /* Compute the predicate element. */
+ tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm);
+ if (is_power_of_2(elements)) {
+ tcg_gen_andi_i64(tmp, tmp, elements - 1);
+ } else {
+ tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements));
+ }
+
+ /* Extract the predicate byte and bit indices. */
+ tcg_gen_shli_i64(tmp, tmp, a->esz);
+ tcg_gen_andi_i64(dbit, tmp, 7);
+ tcg_gen_shri_i64(didx, tmp, 3);
+ if (HOST_BIG_ENDIAN) {
+ tcg_gen_xori_i64(didx, didx, 7);
+ }
+
+ /* Load the predicate word. */
+ tcg_gen_trunc_i64_ptr(ptr, didx);
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
+ tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm));
+
+ /* Extract the predicate bit and replicate to MO_64. */
+ tcg_gen_shr_i64(tmp, tmp, dbit);
+ tcg_gen_andi_i64(tmp, tmp, 1);
+ tcg_gen_neg_i64(tmp, tmp);
+
+ /* Apply to either copy the source, or write zeros. */
+ tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd),
+ pred_full_reg_offset(s, a->pn), tmp, pl, pl);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(dbit);
+ tcg_temp_free_i64(didx);
+ tcg_temp_free_ptr(ptr);
+ return true;
+}
+
+static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
+{
+ tcg_gen_smax_i32(d, a, n);
+ tcg_gen_smin_i32(d, d, m);
+}
+
+static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
+{
+ tcg_gen_smax_i64(d, a, n);
+ tcg_gen_smin_i64(d, d, m);
+}
+
+static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec a)
+{
+ tcg_gen_smax_vec(vece, d, a, n);
+ tcg_gen_smin_vec(vece, d, d, m);
+}
+
+static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop[] = {
+ INDEX_op_smin_vec, INDEX_op_smax_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ { .fniv = gen_sclamp_vec,
+ .fno = gen_helper_gvec_sclamp_b,
+ .opt_opc = vecop,
+ .vece = MO_8 },
+ { .fniv = gen_sclamp_vec,
+ .fno = gen_helper_gvec_sclamp_h,
+ .opt_opc = vecop,
+ .vece = MO_16 },
+ { .fni4 = gen_sclamp_i32,
+ .fniv = gen_sclamp_vec,
+ .fno = gen_helper_gvec_sclamp_s,
+ .opt_opc = vecop,
+ .vece = MO_32 },
+ { .fni8 = gen_sclamp_i64,
+ .fniv = gen_sclamp_vec,
+ .fno = gen_helper_gvec_sclamp_d,
+ .opt_opc = vecop,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
+}
+
+TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a)
+
+static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
+{
+ tcg_gen_umax_i32(d, a, n);
+ tcg_gen_umin_i32(d, d, m);
+}
+
+static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
+{
+ tcg_gen_umax_i64(d, a, n);
+ tcg_gen_umin_i64(d, d, m);
+}
+
+static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec a)
+{
+ tcg_gen_umax_vec(vece, d, a, n);
+ tcg_gen_umin_vec(vece, d, d, m);
+}
+
+static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop[] = {
+ INDEX_op_umin_vec, INDEX_op_umax_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ { .fniv = gen_uclamp_vec,
+ .fno = gen_helper_gvec_uclamp_b,
+ .opt_opc = vecop,
+ .vece = MO_8 },
+ { .fniv = gen_uclamp_vec,
+ .fno = gen_helper_gvec_uclamp_h,
+ .opt_opc = vecop,
+ .vece = MO_16 },
+ { .fni4 = gen_uclamp_i32,
+ .fniv = gen_uclamp_vec,
+ .fno = gen_helper_gvec_uclamp_s,
+ .opt_opc = vecop,
+ .vece = MO_32 },
+ { .fni8 = gen_uclamp_i64,
+ .fniv = gen_uclamp_vec,
+ .fno = gen_helper_gvec_uclamp_d,
+ .opt_opc = vecop,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
+}
+
+TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a)
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
index 82fdbcae53..bd5ae27d09 100644
--- a/target/arm/translate-vfp.c
+++ b/target/arm/translate-vfp.c
@@ -234,6 +234,18 @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
return false;
}
+ /*
+ * Note that rebuild_hflags_a32 has already accounted for being in EL0
+ * and the higher EL in A64 mode, etc. Unlike A64 mode, there do not
+ * appear to be any insns which touch VFP which are allowed.
+ */
+ if (s->sme_trap_nonstreaming) {
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_smetrap(SME_ET_Streaming,
+ s->base.pc_next - s->pc_curr == 2));
+ return false;
+ }
+
if (!s->vfp_enabled && !ignore_vfp_enabled) {
assert(!arm_dc_feature(s, ARM_FEATURE_M));
unallocated_encoding(s);
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 6617de775f..4ffb095c73 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -9378,6 +9378,8 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
}
+ dc->sme_trap_nonstreaming =
+ EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
}
dc->cp_regs = cpu->cp_regs;
dc->features = env->features;
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 22fd882368..af5d4a7086 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -102,6 +102,10 @@ typedef struct DisasContext {
bool pstate_sm;
/* True if PSTATE.ZA is set. */
bool pstate_za;
+ /* True if non-streaming insns should raise an SME Streaming exception. */
+ bool sme_trap_nonstreaming;
+ /* True if the current instruction is non-streaming. */
+ bool is_nonstreaming;
/* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
bool mve_no_pred;
/*
@@ -152,6 +156,11 @@ static inline int plus_2(DisasContext *s, int x)
return x + 2;
}
+static inline int plus_12(DisasContext *s, int x)
+{
+ return x + 12;
+}
+
static inline int times_2(DisasContext *s, int x)
{
return x * 2;
@@ -562,4 +571,11 @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
{ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); }
+#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
+ { \
+ s->is_nonstreaming = true; \
+ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \
+ }
+
#endif /* TARGET_ARM_TRANSLATE_H */
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index 9a9c034e36..f59d3b26ea 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -2690,3 +2690,27 @@ void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
+
+#define DO_CLAMP(NAME, TYPE) \
+void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
+ TYPE aa = *(TYPE *)(a + i); \
+ TYPE nn = *(TYPE *)(n + i); \
+ TYPE mm = *(TYPE *)(m + i); \
+ TYPE dd = MIN(MAX(aa, nn), mm); \
+ *(TYPE *)(d + i) = dd; \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_CLAMP(gvec_sclamp_b, int8_t)
+DO_CLAMP(gvec_sclamp_h, int16_t)
+DO_CLAMP(gvec_sclamp_s, int32_t)
+DO_CLAMP(gvec_sclamp_d, int64_t)
+
+DO_CLAMP(gvec_uclamp_b, uint8_t)
+DO_CLAMP(gvec_uclamp_h, uint16_t)
+DO_CLAMP(gvec_uclamp_s, uint32_t)
+DO_CLAMP(gvec_uclamp_d, uint64_t)