aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2021-05-25 16:17:06 +0100
committerPeter Maydell <peter.maydell@linaro.org>2021-05-25 16:17:06 +0100
commit92f8c6fef13b31ba222c4d20ad8afd2b79c4c28e (patch)
treee205a3fc166810e1c27b2b5a614a92c6e975d545
parent0dab1d36f55c3ed649bb8e4c74b9269ef3a63049 (diff)
parentf8680aaa6e5bfc6022b75157c23db7d2ea98ab11 (diff)
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20210525' into staging
target-arm queue: * Implement SVE2 emulation * Implement integer matrix multiply accumulate * Implement FEAT_TLBIOS * Implement FEAT_TLBRANGE * disas/libvixl: Protect C system header for C++ compiler * Use correct SP in M-profile exception return * AN524, AN547: Correct modelling of internal SRAMs * hw/intc/arm_gicv3_cpuif: Fix EOIR write access check logic * hw/arm/smmuv3: Another range invalidation fix # gpg: Signature made Tue 25 May 2021 16:02:25 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20210525: (114 commits) target/arm: Enable SVE2 and related extensions linux-user/aarch64: Enable hwcap bits for sve2 and related extensions target/arm: Implement integer matrix multiply accumulate target/arm: Implement aarch32 VSUDOT, VUSDOT target/arm: Split decode of VSDOT and VUDOT target/arm: Split out do_neon_ddda target/arm: Fix decode for VDOT (indexed) target/arm: Remove unused fpst from VDOT_scalar target/arm: Split out do_neon_ddda_fpst target/arm: Implement aarch64 SUDOT, USDOT target/arm: Implement SVE2 fp multiply-add long target/arm: Move endian adjustment macros to vec_internal.h target/arm: Implement SVE2 bitwise shift immediate target/arm: Implement 128-bit ZIP, UZP, TRN target/arm: Implement SVE2 LD1RO target/arm: Tidy do_ldrq target/arm: Share table of sve load functions target/arm: Implement SVE2 FLOGB target/arm: Implement SVE2 FCVTXNT, FCVTX target/arm: Implement SVE2 FCVTLT ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--accel/tcg/cputlb.c231
-rw-r--r--disas/libvixl/vixl/code-buffer.h2
-rw-r--r--disas/libvixl/vixl/globals.h16
-rw-r--r--disas/libvixl/vixl/invalset.h2
-rw-r--r--disas/libvixl/vixl/platform.h2
-rw-r--r--disas/libvixl/vixl/utils.cc2
-rw-r--r--disas/libvixl/vixl/utils.h2
-rw-r--r--hw/arm/armsse.c35
-rw-r--r--hw/arm/mps2-tz.c39
-rw-r--r--hw/arm/smmuv3.c50
-rw-r--r--hw/intc/arm_gicv3_cpuif.c48
-rw-r--r--include/exec/exec-all.h44
-rw-r--r--include/hw/arm/armsse.h2
-rw-r--r--linux-user/elfload.c10
-rw-r--r--target/arm/cpu.c2
-rw-r--r--target/arm/cpu.h76
-rw-r--r--target/arm/cpu64.c14
-rw-r--r--target/arm/cpu_tcg.c1
-rw-r--r--target/arm/helper-sve.h722
-rw-r--r--target/arm/helper.c327
-rw-r--r--target/arm/helper.h108
-rw-r--r--target/arm/kvm64.c21
-rw-r--r--target/arm/m_helper.c3
-rw-r--r--target/arm/neon-shared.decode24
-rw-r--r--target/arm/neon_helper.c519
-rw-r--r--target/arm/sve.decode574
-rw-r--r--target/arm/sve_helper.c2110
-rw-r--r--target/arm/translate-a64.c111
-rw-r--r--target/arm/translate-a64.h3
-rw-r--r--target/arm/translate-neon.c221
-rw-r--r--target/arm/translate-sve.c3198
-rw-r--r--target/arm/vec_helper.c805
-rw-r--r--target/arm/vec_internal.h167
33 files changed, 8233 insertions, 1258 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 84e7d91a5c..2f7088614a 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -707,8 +707,9 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
-static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
- target_ulong page, unsigned bits)
+static void tlb_flush_range_locked(CPUArchState *env, int midx,
+ target_ulong addr, target_ulong len,
+ unsigned bits)
{
CPUTLBDesc *d = &env_tlb(env)->d[midx];
CPUTLBDescFast *f = &env_tlb(env)->f[midx];
@@ -718,20 +719,26 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
* If @bits is smaller than the tlb size, there may be multiple entries
* within the TLB; otherwise all addresses that match under @mask hit
* the same TLB entry.
- *
* TODO: Perhaps allow bits to be a few bits less than the size.
* For now, just flush the entire TLB.
+ *
+ * If @len is larger than the tlb size, then it will take longer to
+ * test all of the entries in the TLB than it will to flush it all.
*/
- if (mask < f->mask) {
+ if (mask < f->mask || len > f->mask) {
tlb_debug("forcing full flush midx %d ("
- TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
- midx, page, mask);
+ TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
+ midx, addr, mask, len);
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
return;
}
- /* Check if we need to flush due to large pages. */
- if ((page & d->large_page_mask) == d->large_page_addr) {
+ /*
+ * Check if we need to flush due to large pages.
+ * Because large_page_mask contains all 1's from the msb,
+ * we only need to test the end of the range.
+ */
+ if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
tlb_debug("forcing full flush midx %d ("
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
midx, d->large_page_addr, d->large_page_mask);
@@ -739,85 +746,67 @@ static void tlb_flush_page_bits_locked(CPUArchState *env, int midx,
return;
}
- if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) {
- tlb_n_used_entries_dec(env, midx);
+ for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
+ target_ulong page = addr + i;
+ CPUTLBEntry *entry = tlb_entry(env, midx, page);
+
+ if (tlb_flush_entry_mask_locked(entry, page, mask)) {
+ tlb_n_used_entries_dec(env, midx);
+ }
+ tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
}
- tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
}
typedef struct {
target_ulong addr;
+ target_ulong len;
uint16_t idxmap;
uint16_t bits;
-} TLBFlushPageBitsByMMUIdxData;
+} TLBFlushRangeData;
-static void
-tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu,
- TLBFlushPageBitsByMMUIdxData d)
+static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
+ TLBFlushRangeData d)
{
CPUArchState *env = cpu->env_ptr;
int mmu_idx;
assert_cpu_is_self(cpu);
- tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n",
- d.addr, d.bits, d.idxmap);
+ tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
+ d.addr, d.bits, d.len, d.idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if ((d.idxmap >> mmu_idx) & 1) {
- tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits);
+ tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
}
}
qemu_spin_unlock(&env_tlb(env)->c.lock);
- tb_flush_jmp_cache(cpu, d.addr);
-}
-
-static bool encode_pbm_to_runon(run_on_cpu_data *out,
- TLBFlushPageBitsByMMUIdxData d)
-{
- /* We need 6 bits to hold to hold @bits up to 63. */
- if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) {
- *out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits);
- return true;
+ for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
+ tb_flush_jmp_cache(cpu, d.addr + i);
}
- return false;
-}
-
-static TLBFlushPageBitsByMMUIdxData
-decode_runon_to_pbm(run_on_cpu_data data)
-{
- target_ulong addr_map_bits = (target_ulong) data.target_ptr;
- return (TLBFlushPageBitsByMMUIdxData){
- .addr = addr_map_bits & TARGET_PAGE_MASK,
- .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6,
- .bits = addr_map_bits & 0x3f
- };
}
-static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu,
- run_on_cpu_data runon)
+static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
+ run_on_cpu_data data)
{
- tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon));
-}
-
-static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
- run_on_cpu_data data)
-{
- TLBFlushPageBitsByMMUIdxData *d = data.host_ptr;
- tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d);
+ TLBFlushRangeData *d = data.host_ptr;
+ tlb_flush_range_by_mmuidx_async_0(cpu, *d);
g_free(d);
}
-void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
- uint16_t idxmap, unsigned bits)
+void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits)
{
- TLBFlushPageBitsByMMUIdxData d;
- run_on_cpu_data runon;
+ TLBFlushRangeData d;
- /* If all bits are significant, this devolves to tlb_flush_page. */
- if (bits >= TARGET_LONG_BITS) {
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
return;
}
@@ -829,34 +818,38 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
+ d.len = len;
d.idxmap = idxmap;
d.bits = bits;
if (qemu_cpu_is_self(cpu)) {
- tlb_flush_page_bits_by_mmuidx_async_0(cpu, d);
- } else if (encode_pbm_to_runon(&runon, d)) {
- async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
+ tlb_flush_range_by_mmuidx_async_0(cpu, d);
} else {
- TLBFlushPageBitsByMMUIdxData *p
- = g_new(TLBFlushPageBitsByMMUIdxData, 1);
-
/* Otherwise allocate a structure, freed by the worker. */
- *p = d;
- async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2,
+ TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
+ async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
}
-void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
- target_ulong addr,
- uint16_t idxmap,
- unsigned bits)
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap, unsigned bits)
+{
+ tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
+}
+
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
+ target_ulong addr, target_ulong len,
+ uint16_t idxmap, unsigned bits)
{
- TLBFlushPageBitsByMMUIdxData d;
- run_on_cpu_data runon;
+ TLBFlushRangeData d;
+ CPUState *dst_cpu;
- /* If all bits are significant, this devolves to tlb_flush_page. */
- if (bits >= TARGET_LONG_BITS) {
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
return;
}
@@ -868,40 +861,45 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
+ d.len = len;
d.idxmap = idxmap;
d.bits = bits;
- if (encode_pbm_to_runon(&runon, d)) {
- flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
- } else {
- CPUState *dst_cpu;
- TLBFlushPageBitsByMMUIdxData *p;
-
- /* Allocate a separate data block for each destination cpu. */
- CPU_FOREACH(dst_cpu) {
- if (dst_cpu != src_cpu) {
- p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
- *p = d;
- async_run_on_cpu(dst_cpu,
- tlb_flush_page_bits_by_mmuidx_async_2,
- RUN_ON_CPU_HOST_PTR(p));
- }
+ /* Allocate a separate data block for each destination cpu. */
+ CPU_FOREACH(dst_cpu) {
+ if (dst_cpu != src_cpu) {
+ TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
+ async_run_on_cpu(dst_cpu,
+ tlb_flush_range_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
}
}
- tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d);
+ tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
}
-void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
- target_ulong addr,
- uint16_t idxmap,
- unsigned bits)
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
+ target_ulong addr,
+ uint16_t idxmap, unsigned bits)
{
- TLBFlushPageBitsByMMUIdxData d;
- run_on_cpu_data runon;
+ tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
+ idxmap, bits);
+}
+
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+ TLBFlushRangeData d, *p;
+ CPUState *dst_cpu;
- /* If all bits are significant, this devolves to tlb_flush_page. */
- if (bits >= TARGET_LONG_BITS) {
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
return;
}
@@ -913,32 +911,31 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
+ d.len = len;
d.idxmap = idxmap;
d.bits = bits;
- if (encode_pbm_to_runon(&runon, d)) {
- flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon);
- async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1,
- runon);
- } else {
- CPUState *dst_cpu;
- TLBFlushPageBitsByMMUIdxData *p;
-
- /* Allocate a separate data block for each destination cpu. */
- CPU_FOREACH(dst_cpu) {
- if (dst_cpu != src_cpu) {
- p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
- *p = d;
- async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
- RUN_ON_CPU_HOST_PTR(p));
- }
+ /* Allocate a separate data block for each destination cpu. */
+ CPU_FOREACH(dst_cpu) {
+ if (dst_cpu != src_cpu) {
+ p = g_memdup(&d, sizeof(d));
+ async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
}
-
- p = g_new(TLBFlushPageBitsByMMUIdxData, 1);
- *p = d;
- async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
- RUN_ON_CPU_HOST_PTR(p));
}
+
+ p = g_memdup(&d, sizeof(d));
+ async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
+}
+
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+ tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
+ idxmap, bits);
}
/* update the TLBs so that writes to code in the virtual page 'addr'
diff --git a/disas/libvixl/vixl/code-buffer.h b/disas/libvixl/vixl/code-buffer.h
index f93ebb6b82..b95babbdee 100644
--- a/disas/libvixl/vixl/code-buffer.h
+++ b/disas/libvixl/vixl/code-buffer.h
@@ -27,7 +27,7 @@
#ifndef VIXL_CODE_BUFFER_H
#define VIXL_CODE_BUFFER_H
-#include <string.h>
+#include <cstring>
#include "vixl/globals.h"
namespace vixl {
diff --git a/disas/libvixl/vixl/globals.h b/disas/libvixl/vixl/globals.h
index 7099aa599f..3a71942f1e 100644
--- a/disas/libvixl/vixl/globals.h
+++ b/disas/libvixl/vixl/globals.h
@@ -40,15 +40,17 @@
#define __STDC_FORMAT_MACROS
#endif
-#include <stdint.h>
+extern "C" {
#include <inttypes.h>
-
-#include <assert.h>
-#include <stdarg.h>
-#include <stdio.h>
#include <stdint.h>
-#include <stdlib.h>
-#include <stddef.h>
+}
+
+#include <cassert>
+#include <cstdarg>
+#include <cstddef>
+#include <cstdio>
+#include <cstdlib>
+
#include "vixl/platform.h"
diff --git a/disas/libvixl/vixl/invalset.h b/disas/libvixl/vixl/invalset.h
index ffdc0237b4..2e0871f8c3 100644
--- a/disas/libvixl/vixl/invalset.h
+++ b/disas/libvixl/vixl/invalset.h
@@ -27,7 +27,7 @@
#ifndef VIXL_INVALSET_H_
#define VIXL_INVALSET_H_
-#include <string.h>
+#include <cstring>
#include <algorithm>
#include <vector>
diff --git a/disas/libvixl/vixl/platform.h b/disas/libvixl/vixl/platform.h
index ab588f07f5..26a74de81b 100644
--- a/disas/libvixl/vixl/platform.h
+++ b/disas/libvixl/vixl/platform.h
@@ -28,7 +28,9 @@
#define PLATFORM_H
// Define platform specific functionalities.
+extern "C" {
#include <signal.h>
+}
namespace vixl {
inline void HostBreakpoint() { raise(SIGINT); }
diff --git a/disas/libvixl/vixl/utils.cc b/disas/libvixl/vixl/utils.cc
index 3b8bd75fba..69304d266d 100644
--- a/disas/libvixl/vixl/utils.cc
+++ b/disas/libvixl/vixl/utils.cc
@@ -25,7 +25,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "vixl/utils.h"
-#include <stdio.h>
+#include <cstdio>
namespace vixl {
diff --git a/disas/libvixl/vixl/utils.h b/disas/libvixl/vixl/utils.h
index 5ab134e240..ecb0f1014a 100644
--- a/disas/libvixl/vixl/utils.h
+++ b/disas/libvixl/vixl/utils.h
@@ -27,8 +27,8 @@
#ifndef VIXL_UTILS_H
#define VIXL_UTILS_H
-#include <string.h>
#include <cmath>
+#include <cstring>
#include "vixl/globals.h"
#include "vixl/compiler-intrinsics.h"
diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c
index 2e5d0679e7..a1456cb0f4 100644
--- a/hw/arm/armsse.c
+++ b/hw/arm/armsse.c
@@ -13,6 +13,7 @@
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/bitops.h"
+#include "qemu/units.h"
#include "qapi/error.h"
#include "trace.h"
#include "hw/sysbus.h"
@@ -59,6 +60,7 @@ struct ARMSSEInfo {
const char *cpu_type;
uint32_t sse_version;
int sram_banks;
+ uint32_t sram_bank_base;
int num_cpus;
uint32_t sys_version;
uint32_t iidr;
@@ -69,6 +71,7 @@ struct ARMSSEInfo {
bool has_cpuid;
bool has_cpu_pwrctrl;
bool has_sse_counter;
+ bool has_tcms;
Property *props;
const ARMSSEDeviceInfo *devinfo;
const bool *irq_is_common;
@@ -102,7 +105,7 @@ static Property sse300_properties[] = {
DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64),
- DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15),
+ DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 18),
DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000),
DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true),
DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true),
@@ -504,6 +507,7 @@ static const ARMSSEInfo armsse_variants[] = {
.sse_version = ARMSSE_IOTKIT,
.cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"),
.sram_banks = 1,
+ .sram_bank_base = 0x20000000,
.num_cpus = 1,
.sys_version = 0x41743,
.iidr = 0,
@@ -514,6 +518,7 @@ static const ARMSSEInfo armsse_variants[] = {
.has_cpuid = false,
.has_cpu_pwrctrl = false,
.has_sse_counter = false,
+ .has_tcms = false,
.props = iotkit_properties,
.devinfo = iotkit_devices,
.irq_is_common = sse200_irq_is_common,
@@ -523,6 +528,7 @@ static const ARMSSEInfo armsse_variants[] = {
.sse_version = ARMSSE_SSE200,
.cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"),
.sram_banks = 4,
+ .sram_bank_base = 0x20000000,
.num_cpus = 2,
.sys_version = 0x22041743,
.iidr = 0,
@@ -533,6 +539,7 @@ static const ARMSSEInfo armsse_variants[] = {
.has_cpuid = true,
.has_cpu_pwrctrl = false,
.has_sse_counter = false,
+ .has_tcms = false,
.props = sse200_properties,
.devinfo = sse200_devices,
.irq_is_common = sse200_irq_is_common,
@@ -542,6 +549,7 @@ static const ARMSSEInfo armsse_variants[] = {
.sse_version = ARMSSE_SSE300,
.cpu_type = ARM_CPU_TYPE_NAME("cortex-m55"),
.sram_banks = 2,
+ .sram_bank_base = 0x21000000,
.num_cpus = 1,
.sys_version = 0x7e00043b,
.iidr = 0x74a0043b,
@@ -552,6 +560,7 @@ static const ARMSSEInfo armsse_variants[] = {
.has_cpuid = true,
.has_cpu_pwrctrl = true,
.has_sse_counter = true,
+ .has_tcms = true,
.props = sse300_properties,
.devinfo = sse300_devices,
.irq_is_common = sse300_irq_is_common,
@@ -909,7 +918,6 @@ static void armsse_realize(DeviceState *dev, Error **errp)
const ARMSSEDeviceInfo *devinfo;
int i;
MemoryRegion *mr;
- Error *err = NULL;
SysBusDevice *sbd_apb_ppc0;
SysBusDevice *sbd_secctl;
DeviceState *dev_apb_ppc0;
@@ -918,6 +926,8 @@ static void armsse_realize(DeviceState *dev, Error **errp)
DeviceState *dev_splitter;
uint32_t addr_width_max;
+ ERRP_GUARD();
+
if (!s->board_memory) {
error_setg(errp, "memory property was not set");
return;
@@ -1147,10 +1157,9 @@ static void armsse_realize(DeviceState *dev, Error **errp)
uint32_t sram_bank_size = 1 << s->sram_addr_width;
memory_region_init_ram(&s->sram[i], NULL, ramname,
- sram_bank_size, &err);
+ sram_bank_size, errp);
g_free(ramname);
- if (err) {
- error_propagate(errp, err);
+ if (*errp) {
return;
}
object_property_set_link(OBJECT(&s->mpc[i]), "downstream",
@@ -1161,7 +1170,7 @@ static void armsse_realize(DeviceState *dev, Error **errp)
/* Map the upstream end of the MPC into the right place... */
sbd_mpc = SYS_BUS_DEVICE(&s->mpc[i]);
memory_region_add_subregion(&s->container,
- 0x20000000 + i * sram_bank_size,
+ info->sram_bank_base + i * sram_bank_size,
sysbus_mmio_get_region(sbd_mpc, 1));
/* ...and its register interface */
memory_region_add_subregion(&s->container, 0x50083000 + i * 0x1000,
@@ -1210,6 +1219,20 @@ static void armsse_realize(DeviceState *dev, Error **errp)
sysbus_mmio_get_region(sbd, 1));
}
+ if (info->has_tcms) {
+ /* The SSE-300 has an ITCM at 0x0000_0000 and a DTCM at 0x2000_0000 */
+ memory_region_init_ram(&s->itcm, NULL, "sse300-itcm", 512 * KiB, errp);
+ if (*errp) {
+ return;
+ }
+ memory_region_init_ram(&s->dtcm, NULL, "sse300-dtcm", 512 * KiB, errp);
+ if (*errp) {
+ return;
+ }
+ memory_region_add_subregion(&s->container, 0x00000000, &s->itcm);
+ memory_region_add_subregion(&s->container, 0x20000000, &s->dtcm);
+ }
+
/* Devices behind APB PPC0:
* 0x40000000: timer0
* 0x40001000: timer1
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
index 70aa31a7f6..e23830f4b7 100644
--- a/hw/arm/mps2-tz.c
+++ b/hw/arm/mps2-tz.c
@@ -123,8 +123,10 @@ struct MPS2TZMachineClass {
int numirq; /* Number of external interrupts */
int uart_overflow_irq; /* number of the combined UART overflow IRQ */
uint32_t init_svtor; /* init-svtor setting for SSE */
+ uint32_t sram_addr_width; /* SRAM_ADDR_WIDTH setting for SSE */
const RAMInfo *raminfo;
const char *armsse_type;
+ uint32_t boot_ram_size; /* size of ram at address 0; 0 == find in raminfo */
};
struct MPS2TZMachineState {
@@ -244,18 +246,12 @@ static const RAMInfo an524_raminfo[] = { {
.mpc = 0,
.mrindex = 0,
}, {
- .name = "sram",
- .base = 0x20000000,
- .size = 32 * 4 * KiB,
- .mpc = -1,
- .mrindex = 1,
- }, {
/* We don't model QSPI flash yet; for now expose it as simple ROM */
.name = "QSPI",
.base = 0x28000000,
.size = 8 * MiB,
.mpc = 1,
- .mrindex = 2,
+ .mrindex = 1,
.flags = IS_ROM,
}, {
.name = "DDR",
@@ -269,24 +265,12 @@ static const RAMInfo an524_raminfo[] = { {
};
static const RAMInfo an547_raminfo[] = { {
- .name = "itcm",
- .base = 0x00000000,
- .size = 512 * KiB,
- .mpc = -1,
- .mrindex = 0,
- }, {
.name = "sram",
.base = 0x01000000,
.size = 2 * MiB,
.mpc = 0,
.mrindex = 1,
}, {
- .name = "dtcm",
- .base = 0x20000000,
- .size = 4 * 128 * KiB,
- .mpc = -1,
- .mrindex = 2,
- }, {
.name = "sram 2",
.base = 0x21000000,
.size = 4 * MiB,
@@ -766,6 +750,14 @@ static uint32_t boot_ram_size(MPS2TZMachineState *mms)
const RAMInfo *p;
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms);
+ /*
+ * Use a per-board specification (for when the boot RAM is in
+ * the SSE and so doesn't have a RAMInfo list entry)
+ */
+ if (mmc->boot_ram_size) {
+ return mmc->boot_ram_size;
+ }
+
for (p = mmc->raminfo; p->name; p++) {
if (p->base == boot_mem_base(mms)) {
return p->size;
@@ -812,6 +804,7 @@ static void mps2tz_common_init(MachineState *machine)
OBJECT(system_memory), &error_abort);
qdev_prop_set_uint32(iotkitdev, "EXP_NUMIRQ", mmc->numirq);
qdev_prop_set_uint32(iotkitdev, "init-svtor", mmc->init_svtor);
+ qdev_prop_set_uint32(iotkitdev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width);
qdev_connect_clock_in(iotkitdev, "MAINCLK", mms->sysclk);
qdev_connect_clock_in(iotkitdev, "S32KCLK", mms->s32kclk);
sysbus_realize(SYS_BUS_DEVICE(&mms->iotkit), &error_fatal);
@@ -1269,8 +1262,10 @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data)
mmc->numirq = 92;
mmc->uart_overflow_irq = 47;
mmc->init_svtor = 0x10000000;
+ mmc->sram_addr_width = 15;
mmc->raminfo = an505_raminfo;
mmc->armsse_type = TYPE_IOTKIT;
+ mmc->boot_ram_size = 0;
mps2tz_set_default_ram_info(mmc);
}
@@ -1296,8 +1291,10 @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data)
mmc->numirq = 92;
mmc->uart_overflow_irq = 47;
mmc->init_svtor = 0x10000000;
+ mmc->sram_addr_width = 15;
mmc->raminfo = an505_raminfo; /* AN521 is the same as AN505 here */
mmc->armsse_type = TYPE_SSE200;
+ mmc->boot_ram_size = 0;
mps2tz_set_default_ram_info(mmc);
}
@@ -1323,8 +1320,10 @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data)
mmc->numirq = 95;
mmc->uart_overflow_irq = 47;
mmc->init_svtor = 0x10000000;
+ mmc->sram_addr_width = 15;
mmc->raminfo = an524_raminfo;
mmc->armsse_type = TYPE_SSE200;
+ mmc->boot_ram_size = 0;
mps2tz_set_default_ram_info(mmc);
object_class_property_add_str(oc, "remap", mps2_get_remap, mps2_set_remap);
@@ -1355,8 +1354,10 @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data)
mmc->numirq = 96;
mmc->uart_overflow_irq = 48;
mmc->init_svtor = 0x00000000;
+ mmc->sram_addr_width = 21;
mmc->raminfo = an547_raminfo;
mmc->armsse_type = TYPE_SSE300;
+ mmc->boot_ram_size = 512 * KiB;
mps2tz_set_default_ram_info(mmc);
}
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 7bed2ac520..01b60bee49 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -857,43 +857,45 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
{
- uint8_t scale = 0, num = 0, ttl = 0;
- dma_addr_t addr = CMD_ADDR(cmd);
+ dma_addr_t end, addr = CMD_ADDR(cmd);
uint8_t type = CMD_TYPE(cmd);
uint16_t vmid = CMD_VMID(cmd);
+ uint8_t scale = CMD_SCALE(cmd);
+ uint8_t num = CMD_NUM(cmd);
+ uint8_t ttl = CMD_TTL(cmd);
bool leaf = CMD_LEAF(cmd);
uint8_t tg = CMD_TG(cmd);
- uint64_t first_page = 0, last_page;
- uint64_t num_pages = 1;
+ uint64_t num_pages;
+ uint8_t granule;
int asid = -1;
- if (tg) {
- scale = CMD_SCALE(cmd);
- num = CMD_NUM(cmd);
- ttl = CMD_TTL(cmd);
- num_pages = (num + 1) * BIT_ULL(scale);
- }
-
if (type == SMMU_CMD_TLBI_NH_VA) {
asid = CMD_ASID(cmd);
}
- /* Split invalidations into ^2 range invalidations */
- last_page = num_pages - 1;
- while (num_pages) {
- uint8_t granule = tg * 2 + 10;
- uint64_t mask, count;
+ if (!tg) {
+ trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
+ smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
+ smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl);
+ return;
+ }
+
+ /* RIL in use */
- mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
- count = mask + 1;
+ num_pages = (num + 1) * BIT_ULL(scale);
+ granule = tg * 2 + 10;
+
+ /* Split invalidations into ^2 range invalidations */
+ end = addr + (num_pages << granule) - 1;
- trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
- smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
- smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
+ while (addr != end + 1) {
+ uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
- num_pages -= count;
- first_page += count;
- addr += count * BIT_ULL(granule);
+ num_pages = (mask + 1) >> granule;
+ trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
+ smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
+ smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
+ addr += mask + 1;
}
}
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 43ef1d7a84..81f94c7f4a 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -1307,27 +1307,16 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
GICv3CPUState *cs = icc_cs_from_env(env);
int irq = value & 0xffffff;
int grp;
+ bool is_eoir0 = ri->crm == 8;
- if (icv_access(env, ri->crm == 8 ? HCR_FMO : HCR_IMO)) {
+ if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
icv_eoir_write(env, ri, value);
return;
}
- trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1,
+ trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
gicv3_redist_affid(cs), value);
- if (ri->crm == 8) {
- /* EOIR0 */
- grp = GICV3_G0;
- } else {
- /* EOIR1 */
- if (arm_is_secure(env)) {
- grp = GICV3_G1;
- } else {
- grp = GICV3_G1NS;
- }
- }
-
if (irq >= cs->gic->num_irq) {
/* This handles two cases:
* 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
@@ -1340,8 +1329,35 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
return;
}
- if (icc_highest_active_group(cs) != grp) {
- return;
+ grp = icc_highest_active_group(cs);
+ switch (grp) {
+ case GICV3_G0:
+ if (!is_eoir0) {
+ return;
+ }
+ if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
+ && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
+ return;
+ }
+ break;
+ case GICV3_G1:
+ if (is_eoir0) {
+ return;
+ }
+ if (!arm_is_secure(env)) {
+ return;
+ }
+ break;
+ case GICV3_G1NS:
+ if (is_eoir0) {
+ return;
+ }
+ if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
+ return;
+ }
+ break;
+ default:
+ g_assert_not_reached();
}
icc_drop_prio(cs, grp);
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 6b036cae8f..8021adf38f 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -263,6 +263,31 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
(CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
/**
+ * tlb_flush_range_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of the start of the range to be flushed
+ * @len: length of range to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
+ * comparing only the low @bits worth of each virtual page.
+ */
+void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits);
+
+/* Similarly, with broadcast and syncing. */
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits);
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits);
+
+/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
* @vaddr: virtual address of page to add entry for
@@ -365,6 +390,25 @@ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
uint16_t idxmap, unsigned bits)
{
}
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
+static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ target_long len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
#endif
/**
* probe_access:
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
index 36592be62c..9648e7a419 100644
--- a/include/hw/arm/armsse.h
+++ b/include/hw/arm/armsse.h
@@ -198,6 +198,8 @@ struct ARMSSE {
MemoryRegion alias2;
MemoryRegion alias3[SSE_MAX_CPUS];
MemoryRegion sram[MAX_SRAM_BANKS];
+ MemoryRegion itcm;
+ MemoryRegion dtcm;
qemu_irq *exp_irqs[SSE_MAX_CPUS];
qemu_irq ppc0_irq;
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 0e832b2649..1ab97e38e0 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -648,8 +648,18 @@ static uint32_t get_elf_hwcap2(void)
uint32_t hwcaps = 0;
GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP);
+ GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2);
+ GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES);
+ GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL);
+ GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM);
+ GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3);
+ GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4);
GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2);
GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT);
+ GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM);
+ GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM);
+ GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM);
+ GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM);
GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 4eb0d2f85c..7aeb4b1381 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1503,6 +1503,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
t = cpu->isar.id_aa64isar1;
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
+ t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
cpu->isar.id_aa64isar1 = t;
t = cpu->isar.id_aa64pfr0;
@@ -1517,6 +1518,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
u = cpu->isar.id_isar6;
u = FIELD_DP32(u, ID_ISAR6, DP, 0);
u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
+ u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
cpu->isar.id_isar6 = u;
if (!arm_feature(env, ARM_FEATURE_M)) {
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 616b393253..04f8be35bf 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -947,6 +947,7 @@ struct ARMCPU {
uint64_t id_aa64mmfr2;
uint64_t id_aa64dfr0;
uint64_t id_aa64dfr1;
+ uint64_t id_aa64zfr0;
} isar;
uint64_t midr;
uint32_t revidr;
@@ -2034,6 +2035,16 @@ FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4)
FIELD(ID_AA64DFR0, TRACEFILT, 40, 4)
FIELD(ID_AA64DFR0, MTPMU, 48, 4)
+FIELD(ID_AA64ZFR0, SVEVER, 0, 4)
+FIELD(ID_AA64ZFR0, AES, 4, 4)
+FIELD(ID_AA64ZFR0, BITPERM, 16, 4)
+FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4)
+FIELD(ID_AA64ZFR0, SHA3, 32, 4)
+FIELD(ID_AA64ZFR0, SM4, 40, 4)
+FIELD(ID_AA64ZFR0, I8MM, 44, 4)
+FIELD(ID_AA64ZFR0, F32MM, 52, 4)
+FIELD(ID_AA64ZFR0, F64MM, 56, 4)
+
FIELD(ID_DFR0, COPDBG, 0, 4)
FIELD(ID_DFR0, COPSDBG, 4, 4)
FIELD(ID_DFR0, MMAPDBG, 8, 4)
@@ -3772,6 +3783,11 @@ static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
}
+static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0;
+}
+
static inline bool isar_feature_aa32_ras(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0;
@@ -4071,6 +4087,16 @@ static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
}
+static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
+}
+
+static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0;
+}
+
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
@@ -4195,6 +4221,11 @@ static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2;
}
+static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0;
+}
+
static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
@@ -4215,6 +4246,51 @@ static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0;
}
+static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2;
+}
+
+static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0;
+}
+
+static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0;
+}
+
+static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0;
+}
+
+static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0;
+}
+
/*
* Feature tests for "does this exist in either 32-bit or 64-bit?"
*/
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index f0a9e968c9..d561dc7acc 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -651,6 +651,7 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
+ t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
cpu->isar.id_aa64isar0 = t;
@@ -662,6 +663,7 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */
+ t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);
cpu->isar.id_aa64isar1 = t;
t = cpu->isar.id_aa64pfr0;
@@ -702,6 +704,17 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */
cpu->isar.id_aa64mmfr2 = t;
+ t = cpu->isar.id_aa64zfr0;
+ t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* PMULL */
+ t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);
+ cpu->isar.id_aa64zfr0 = t;
+
/* Replicate the same data to the 32-bit id registers. */
u = cpu->isar.id_isar5;
u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
@@ -718,6 +731,7 @@ static void aarch64_max_initfn(Object *obj)
u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
u = FIELD_DP32(u, ID_ISAR6, SB, 1);
u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
+ u = FIELD_DP32(u, ID_ISAR6, I8MM, 1);
cpu->isar.id_isar6 = u;
u = cpu->isar.id_pfr0;
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
index 046e476f65..d3458335ed 100644
--- a/target/arm/cpu_tcg.c
+++ b/target/arm/cpu_tcg.c
@@ -968,6 +968,7 @@ static void arm_max_initfn(Object *obj)
t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
t = FIELD_DP32(t, ID_ISAR6, SB, 1);
t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
+ t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
cpu->isar.id_isar6 = t;
t = cpu->isar.mvfr1;
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index e4cadd2a65..29a14a21f5 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -158,6 +158,128 @@ DEF_HELPER_FLAGS_5(sve_umulh_zpzz_s, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_5(sve_umulh_zpzz_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG,
@@ -204,6 +326,105 @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG,
@@ -440,6 +661,16 @@ DEF_HELPER_FLAGS_4(sve_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_tbl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_tbx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_tbx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_tbx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_tbx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_3(sve_sunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve_sunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve_sunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
@@ -458,16 +689,19 @@ DEF_HELPER_FLAGS_4(sve_zip_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_zip_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_zip_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_zip_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_zip_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uzp_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_trn_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_compact_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_compact_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@@ -488,6 +722,19 @@ DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_rbit_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqabs_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqneg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_5(sve_splice, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_b, TCG_CALL_NO_RWG,
@@ -679,7 +926,8 @@ DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(sve_while, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
+DEF_HELPER_FLAGS_3(sve_whilel, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
+DEF_HELPER_FLAGS_3(sve_whileg, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
@@ -1133,6 +1381,46 @@ DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_saddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_saddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_saddl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_ssubl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ssubl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ssubl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_uaddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uaddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uaddl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_usubl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_usubl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_usubl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_uabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_saddw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_saddw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_saddw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_ssubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ssubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ssubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_uaddw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uaddw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uaddw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_usubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_usubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_usubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@@ -2073,4 +2361,436 @@ DEF_HELPER_FLAGS_6(sve_stdd_le_zd_mte, TCG_CALL_NO_WG,
DEF_HELPER_FLAGS_6(sve_stdd_be_zd_mte, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_smull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_smull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_smull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_umull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_pmull_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sshll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sshll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sshll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_ushll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_ushll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_ushll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_eoril_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_eoril_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_eoril_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_eoril_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_bext_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bext_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bext_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bext_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_bdep_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bdep_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bdep_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bdep_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_bgrp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bgrp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bgrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bgrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_cadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_cadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_cadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_cadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqcadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqcadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqcadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqcadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sabal_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sabal_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sabal_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uabal_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uabal_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uabal_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_adcl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_adcl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_shrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_shrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_shrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_shrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_shrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_shrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_rshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_rshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_rshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_rshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_rshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_rshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqshrunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_addhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_addhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_addhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_addhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_addhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_addhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_raddhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_raddhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_raddhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_raddhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_raddhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_raddhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_subhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_subhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_subhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_subhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_subhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_subhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_rsubhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_rsubhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_rsubhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_rsubhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_rsubhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_rsubhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_histcnt_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_histcnt_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_histseg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_xar_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_xar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_xar_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_eor3, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_bcax, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_bsl1n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_bsl2n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_nbsl, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_smlal_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlal_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlsl_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlsl_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlal_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlal_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlsl_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlsl_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_smull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_smull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_cmla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cmla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_cdot_zzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cdot_zzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_cdot_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cdot_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_fcvtnt_sh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_fcvtnt_ds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_fcvtlt_hs, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_fcvtlt_sd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(flogb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(flogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(flogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqshlu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshlu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshlu_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshlu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 3b365a78cb..a66c1f0b9e 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -4759,6 +4759,172 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMMMUIdxBit_SE3, bits);
}
+#ifdef TARGET_AARCH64
+static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
+ uint64_t value)
+{
+ unsigned int page_shift;
+ unsigned int page_size_granule;
+ uint64_t num;
+ uint64_t scale;
+ uint64_t exponent;
+ uint64_t length;
+
+ num = extract64(value, 39, 4);
+ scale = extract64(value, 44, 2);
+ page_size_granule = extract64(value, 46, 2);
+
+ page_shift = page_size_granule * 2 + 12;
+
+ if (page_size_granule == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
+ page_size_granule);
+ return 0;
+ }
+
+ exponent = (5 * scale) + 1;
+ length = (num + 1) << (exponent + page_shift);
+
+ return length;
+}
+
+static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value,
+ bool two_ranges)
+{
+ /* TODO: ARMv8.7 FEAT_LPA2 */
+ uint64_t pageaddr;
+
+ if (two_ranges) {
+ pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
+ } else {
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ }
+
+ return pageaddr;
+}
+
+static void do_rvae_write(CPUARMState *env, uint64_t value,
+ int idxmap, bool synced)
+{
+ ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
+ bool two_ranges = regime_has_2_ranges(one_idx);
+ uint64_t baseaddr, length;
+ int bits;
+
+ baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, one_idx, baseaddr);
+
+ if (synced) {
+ tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
+ baseaddr,
+ length,
+ idxmap,
+ bits);
+ } else {
+ tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr,
+ length, idxmap, bits);
+ }
+}
+
+static void tlbi_aa64_rvae1_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL1&0.
+ * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+
+ do_rvae_write(env, value, vae1_tlbmask(env),
+ tlb_force_broadcast(env));
+}
+
+static void tlbi_aa64_rvae1is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable EL1&0.
+ * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
+ * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
+ * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
+ * shareable specific flushes.
+ */
+
+ do_rvae_write(env, value, vae1_tlbmask(env), true);
+}
+
+static int vae2_tlbmask(CPUARMState *env)
+{
+ return (arm_is_secure_below_el3(env)
+ ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
+}
+
+static void tlbi_aa64_rvae2_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL2.
+ * Currently handles all of RVAE2 and RVALE2,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+
+ do_rvae_write(env, value, vae2_tlbmask(env),
+ tlb_force_broadcast(env));
+
+
+}
+
+static void tlbi_aa64_rvae2is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable, EL2.
+ * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer shareable specific flushes.
+ */
+
+ do_rvae_write(env, value, vae2_tlbmask(env), true);
+
+}
+
+static void tlbi_aa64_rvae3_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3.
+ * Currently handles all of RVAE3 and RVALE3,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+
+ do_rvae_write(env, value, ARMMMUIdxBit_SE3,
+ tlb_force_broadcast(env));
+}
+
+static void tlbi_aa64_rvae3is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3, Inner/Outer Shareable.
+ * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer specific flushes.
+ */
+
+ do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
+}
+#endif
+
static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -6920,6 +7086,158 @@ static const ARMCPRegInfo pauth_reginfo[] = {
REGINFO_SENTINEL
};
+static const ARMCPRegInfo tlbirange_reginfo[] = {
+ { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3_write },
+ { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3_write },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo tlbios_reginfo[] = {
+ { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle2is_write },
+ { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3is_write },
+ REGINFO_SENTINEL
+};
+
static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
{
Error *err = NULL;
@@ -7561,8 +7879,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- /* At present, only SVEver == 0 is defined anyway. */
- .resetvalue = 0 },
+ .resetvalue = cpu->isar.id_aa64zfr0 },
{ .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8289,6 +8606,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_rndr, cpu)) {
define_arm_cp_regs(cpu, rndr_reginfo);
}
+ if (cpu_isar_feature(aa64_tlbirange, cpu)) {
+ define_arm_cp_regs(cpu, tlbirange_reginfo);
+ }
+ if (cpu_isar_feature(aa64_tlbios, cpu)) {
+ define_arm_cp_regs(cpu, tlbios_reginfo);
+ }
#ifndef CONFIG_USER_ONLY
/* Data Cache clean instructions up to PoP */
if (cpu_isar_feature(aa64_dcpop, cpu)) {
diff --git a/target/arm/helper.h b/target/arm/helper.h
index ff8148ddc6..23ccb0f72f 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -591,34 +591,60 @@ DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sdot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_udot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sdot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_udot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
+DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcmlah, TCG_CALL_NO_RWG,
+DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
+DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlas, TCG_CALL_NO_RWG,
+
+DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
+DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG,
+DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_5(neon_paddh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(neon_pmaxh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(neon_pminh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
@@ -828,6 +854,16 @@ DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@@ -926,6 +962,44 @@ DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
#ifdef TARGET_AARCH64
#include "helper-a64.h"
#include "helper-sve.h"
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index dff85f6db9..37ceadd9a9 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -647,17 +647,26 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
- kvm_arm_destroy_scratch_host_vcpu(fdarray);
-
- if (err < 0) {
- return false;
- }
-
/* Add feature bits that can't appear until after VCPU init. */
if (sve_supported) {
t = ahcf->isar.id_aa64pfr0;
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
ahcf->isar.id_aa64pfr0 = t;
+
+ /*
+ * Before v5.1, KVM did not support SVE and did not expose
+ * ID_AA64ZFR0_EL1 even as RAZ. After v5.1, KVM still does
+ * not expose the register to "user" requests like this
+ * unless the host supports SVE.
+ */
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
+ ARM64_SYS_REG(3, 0, 0, 4, 4));
+ }
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+
+ if (err < 0) {
+ return false;
}
/*
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
index d63ae465e1..eda74e5545 100644
--- a/target/arm/m_helper.c
+++ b/target/arm/m_helper.c
@@ -1597,10 +1597,11 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
* We use this limited C variable scope so we don't accidentally
* use 'frame_sp_p' after we do something that makes it invalid.
*/
+ bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
return_to_secure,
!return_to_handler,
- return_to_sp_process);
+ spsel);
uint32_t frameptr = *frame_sp_p;
bool pop_ok = true;
ARMMMUIdx mmu_idx;
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
index ca0c699072..cc9f4cdd85 100644
--- a/target/arm/neon-shared.decode
+++ b/target/arm/neon-shared.decode
@@ -46,8 +46,11 @@ VCMLA 1111 110 rot:2 . 1 . .... .... 1000 . q:1 . 0 .... \
VCADD 1111 110 rot:1 1 . 0 . .... .... 1000 . q:1 . 0 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp size=%vcadd_size
-# VUDOT and VSDOT
-VDOT 1111 110 00 . 10 .... .... 1101 . q:1 . u:1 .... \
+VSDOT 1111 110 00 . 10 .... .... 1101 . q:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VUDOT 1111 110 00 . 10 .... .... 1101 . q:1 . 1 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VUSDOT 1111 110 01 . 10 .... .... 1101 . q:1 . 0 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp
# VFM[AS]L
@@ -56,13 +59,26 @@ VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
VFML 1111 110 0 s:1 . 10 .... .... 1000 . 1 . 1 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp q=1
+VSMMLA 1111 1100 0.10 .... .... 1100 .1.0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VUMMLA 1111 1100 0.10 .... .... 1100 .1.1 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VUSMMLA 1111 1100 1.10 .... .... 1100 .1.0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
vn=%vn_dp vd=%vd_dp size=1
VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp size=2 index=0
-VDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 u:1 rm:4 \
- vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VSDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 0 vm:4 \
+ vn=%vn_dp vd=%vd_dp
+VUDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 1 vm:4 \
+ vn=%vn_dp vd=%vd_dp
+VUSDOT_scalar 1111 1110 1 . 00 .... .... 1101 . q:1 index:1 0 vm:4 \
+ vn=%vn_dp vd=%vd_dp
+VSUDOT_scalar 1111 1110 1 . 00 .... .... 1101 . q:1 index:1 1 vm:4 \
+ vn=%vn_dp vd=%vd_dp
%vfml_scalar_q0_rm 0:3 5:1
%vfml_scalar_q1_index 5:1 3:1
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
index b637265691..338b9189d5 100644
--- a/target/arm/neon_helper.c
+++ b/target/arm/neon_helper.c
@@ -11,6 +11,7 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
+#include "vec_internal.h"
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
@@ -576,496 +577,154 @@ NEON_POP(pmax_s16, neon_s16, 2)
NEON_POP(pmax_u16, neon_u16, 2)
#undef NEON_FN
-#define NEON_FN(dest, src1, src2) do { \
- int8_t tmp; \
- tmp = (int8_t)src2; \
- if (tmp >= (ssize_t)sizeof(src1) * 8 || \
- tmp <= -(ssize_t)sizeof(src1) * 8) { \
- dest = 0; \
- } else if (tmp < 0) { \
- dest = src1 >> -tmp; \
- } else { \
- dest = src1 << tmp; \
- }} while (0)
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, NULL))
NEON_VOP(shl_u16, neon_u16, 2)
#undef NEON_FN
-#define NEON_FN(dest, src1, src2) do { \
- int8_t tmp; \
- tmp = (int8_t)src2; \
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
- dest = 0; \
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
- dest = src1 >> (sizeof(src1) * 8 - 1); \
- } else if (tmp < 0) { \
- dest = src1 >> -tmp; \
- } else { \
- dest = src1 << tmp; \
- }} while (0)
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, NULL))
NEON_VOP(shl_s16, neon_s16, 2)
#undef NEON_FN
-#define NEON_FN(dest, src1, src2) do { \
- int8_t tmp; \
- tmp = (int8_t)src2; \
- if ((tmp >= (ssize_t)sizeof(src1) * 8) \
- || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
- dest = 0; \
- } else if (tmp < 0) { \
- dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
- } else { \
- dest = src1 << tmp; \
- }} while (0)
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
NEON_VOP(rshl_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
NEON_VOP(rshl_s16, neon_s16, 2)
#undef NEON_FN
-/* The addition of the rounding constant may overflow, so we use an
- * intermediate 64 bit accumulator. */
-uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
+uint32_t HELPER(neon_rshl_s32)(uint32_t val, uint32_t shift)
{
- int32_t dest;
- int32_t val = (int32_t)valop;
- int8_t shift = (int8_t)shiftop;
- if ((shift >= 32) || (shift <= -32)) {
- dest = 0;
- } else if (shift < 0) {
- int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
- dest = big_dest >> -shift;
- } else {
- dest = val << shift;
- }
- return dest;
+ return do_sqrshl_bhs(val, (int8_t)shift, 32, true, NULL);
}
-/* Handling addition overflow with 64 bit input values is more
- * tricky than with 32 bit values. */
-uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
-{
- int8_t shift = (int8_t)shiftop;
- int64_t val = valop;
- if ((shift >= 64) || (shift <= -64)) {
- val = 0;
- } else if (shift < 0) {
- val >>= (-shift - 1);
- if (val == INT64_MAX) {
- /* In this case, it means that the rounding constant is 1,
- * and the addition would overflow. Return the actual
- * result directly. */
- val = 0x4000000000000000LL;
- } else {
- val++;
- val >>= 1;
- }
- } else {
- val <<= shift;
- }
- return val;
+uint64_t HELPER(neon_rshl_s64)(uint64_t val, uint64_t shift)
+{
+ return do_sqrshl_d(val, (int8_t)shift, true, NULL);
}
-#define NEON_FN(dest, src1, src2) do { \
- int8_t tmp; \
- tmp = (int8_t)src2; \
- if (tmp >= (ssize_t)sizeof(src1) * 8 || \
- tmp < -(ssize_t)sizeof(src1) * 8) { \
- dest = 0; \
- } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
- dest = src1 >> (-tmp - 1); \
- } else if (tmp < 0) { \
- dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
- } else { \
- dest = src1 << tmp; \
- }} while (0)
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
NEON_VOP(rshl_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
NEON_VOP(rshl_u16, neon_u16, 2)
#undef NEON_FN
-/* The addition of the rounding constant may overflow, so we use an
- * intermediate 64 bit accumulator. */
-uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
+uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shift)
{
- uint32_t dest;
- int8_t shift = (int8_t)shiftop;
- if (shift >= 32 || shift < -32) {
- dest = 0;
- } else if (shift == -32) {
- dest = val >> 31;
- } else if (shift < 0) {
- uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
- dest = big_dest >> -shift;
- } else {
- dest = val << shift;
- }
- return dest;
+ return do_uqrshl_bhs(val, (int8_t)shift, 32, true, NULL);
}
-/* Handling addition overflow with 64 bit input values is more
- * tricky than with 32 bit values. */
-uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
-{
- int8_t shift = (uint8_t)shiftop;
- if (shift >= 64 || shift < -64) {
- val = 0;
- } else if (shift == -64) {
- /* Rounding a 1-bit result just preserves that bit. */
- val >>= 63;
- } else if (shift < 0) {
- val >>= (-shift - 1);
- if (val == UINT64_MAX) {
- /* In this case, it means that the rounding constant is 1,
- * and the addition would overflow. Return the actual
- * result directly. */
- val = 0x8000000000000000ULL;
- } else {
- val++;
- val >>= 1;
- }
- } else {
- val <<= shift;
- }
- return val;
+uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shift)
+{
+ return do_uqrshl_d(val, (int8_t)shift, true, NULL);
}
-#define NEON_FN(dest, src1, src2) do { \
- int8_t tmp; \
- tmp = (int8_t)src2; \
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
- if (src1) { \
- SET_QC(); \
- dest = ~0; \
- } else { \
- dest = 0; \
- } \
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
- dest = 0; \
- } else if (tmp < 0) { \
- dest = src1 >> -tmp; \
- } else { \
- dest = src1 << tmp; \
- if ((dest >> tmp) != src1) { \
- SET_QC(); \
- dest = ~0; \
- } \
- }} while (0)
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
NEON_VOP_ENV(qshl_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
NEON_VOP_ENV(qshl_u16, neon_u16, 2)
-NEON_VOP_ENV(qshl_u32, neon_u32, 1)
#undef NEON_FN
-uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
-{
- int8_t shift = (int8_t)shiftop;
- if (shift >= 64) {
- if (val) {
- val = ~(uint64_t)0;
- SET_QC();
- }
- } else if (shift <= -64) {
- val = 0;
- } else if (shift < 0) {
- val >>= -shift;
- } else {
- uint64_t tmp = val;
- val <<= shift;
- if ((val >> shift) != tmp) {
- SET_QC();
- val = ~(uint64_t)0;
- }
- }
- return val;
+uint32_t HELPER(neon_qshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift)
+{
+ return do_uqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc);
}
-#define NEON_FN(dest, src1, src2) do { \
- int8_t tmp; \
- tmp = (int8_t)src2; \
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
- if (src1) { \
- SET_QC(); \
- dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
- if (src1 > 0) { \
- dest--; \
- } \
- } else { \
- dest = src1; \
- } \
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
- dest = src1 >> 31; \
- } else if (tmp < 0) { \
- dest = src1 >> -tmp; \
- } else { \
- dest = src1 << tmp; \
- if ((dest >> tmp) != src1) { \
- SET_QC(); \
- dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
- if (src1 > 0) { \
- dest--; \
- } \
- } \
- }} while (0)
+uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift)
+{
+ return do_uqrshl_d(val, (int8_t)shift, false, env->vfp.qc);
+}
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
NEON_VOP_ENV(qshl_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
NEON_VOP_ENV(qshl_s16, neon_s16, 2)
-NEON_VOP_ENV(qshl_s32, neon_s32, 1)
#undef NEON_FN
-uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
-{
- int8_t shift = (uint8_t)shiftop;
- int64_t val = valop;
- if (shift >= 64) {
- if (val) {
- SET_QC();
- val = (val >> 63) ^ ~SIGNBIT64;
- }
- } else if (shift <= -64) {
- val >>= 63;
- } else if (shift < 0) {
- val >>= -shift;
- } else {
- int64_t tmp = val;
- val <<= shift;
- if ((val >> shift) != tmp) {
- SET_QC();
- val = (tmp >> 63) ^ ~SIGNBIT64;
- }
- }
- return val;
+uint32_t HELPER(neon_qshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
+{
+ return do_sqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc);
}
-#define NEON_FN(dest, src1, src2) do { \
- if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
- SET_QC(); \
- dest = 0; \
- } else { \
- int8_t tmp; \
- tmp = (int8_t)src2; \
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
- if (src1) { \
- SET_QC(); \
- dest = ~0; \
- } else { \
- dest = 0; \
- } \
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
- dest = 0; \
- } else if (tmp < 0) { \
- dest = src1 >> -tmp; \
- } else { \
- dest = src1 << tmp; \
- if ((dest >> tmp) != src1) { \
- SET_QC(); \
- dest = ~0; \
- } \
- } \
- }} while (0)
-NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
-NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
+uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
+{
+ return do_sqrshl_d(val, (int8_t)shift, false, env->vfp.qc);
+}
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_suqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
+NEON_VOP_ENV(qshlu_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_suqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
+NEON_VOP_ENV(qshlu_s16, neon_s16, 2)
#undef NEON_FN
-uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
+uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
{
- if ((int32_t)valop < 0) {
- SET_QC();
- return 0;
- }
- return helper_neon_qshl_u32(env, valop, shiftop);
+ return do_suqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc);
}
-uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
+uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
{
- if ((int64_t)valop < 0) {
- SET_QC();
- return 0;
- }
- return helper_neon_qshl_u64(env, valop, shiftop);
+ return do_suqrshl_d(val, (int8_t)shift, false, env->vfp.qc);
}
-#define NEON_FN(dest, src1, src2) do { \
- int8_t tmp; \
- tmp = (int8_t)src2; \
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
- if (src1) { \
- SET_QC(); \
- dest = ~0; \
- } else { \
- dest = 0; \
- } \
- } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
- dest = 0; \
- } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
- dest = src1 >> (sizeof(src1) * 8 - 1); \
- } else if (tmp < 0) { \
- dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
- } else { \
- dest = src1 << tmp; \
- if ((dest >> tmp) != src1) { \
- SET_QC(); \
- dest = ~0; \
- } \
- }} while (0)
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
#undef NEON_FN
-/* The addition of the rounding constant may overflow, so we use an
- * intermediate 64 bit accumulator. */
-uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop)
+uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift)
{
- uint32_t dest;
- int8_t shift = (int8_t)shiftop;
- if (shift >= 32) {
- if (val) {
- SET_QC();
- dest = ~0;
- } else {
- dest = 0;
- }
- } else if (shift < -32) {
- dest = 0;
- } else if (shift == -32) {
- dest = val >> 31;
- } else if (shift < 0) {
- uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
- dest = big_dest >> -shift;
- } else {
- dest = val << shift;
- if ((dest >> shift) != val) {
- SET_QC();
- dest = ~0;
- }
- }
- return dest;
+ return do_uqrshl_bhs(val, (int8_t)shift, 32, true, env->vfp.qc);
}
-/* Handling addition overflow with 64 bit input values is more
- * tricky than with 32 bit values. */
-uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
-{
- int8_t shift = (int8_t)shiftop;
- if (shift >= 64) {
- if (val) {
- SET_QC();
- val = ~0;
- }
- } else if (shift < -64) {
- val = 0;
- } else if (shift == -64) {
- val >>= 63;
- } else if (shift < 0) {
- val >>= (-shift - 1);
- if (val == UINT64_MAX) {
- /* In this case, it means that the rounding constant is 1,
- * and the addition would overflow. Return the actual
- * result directly. */
- val = 0x8000000000000000ULL;
- } else {
- val++;
- val >>= 1;
- }
- } else { \
- uint64_t tmp = val;
- val <<= shift;
- if ((val >> shift) != tmp) {
- SET_QC();
- val = ~0;
- }
- }
- return val;
+uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift)
+{
+ return do_uqrshl_d(val, (int8_t)shift, true, env->vfp.qc);
}
-#define NEON_FN(dest, src1, src2) do { \
- int8_t tmp; \
- tmp = (int8_t)src2; \
- if (tmp >= (ssize_t)sizeof(src1) * 8) { \
- if (src1) { \
- SET_QC(); \
- dest = (typeof(dest))(1 << (sizeof(src1) * 8 - 1)); \
- if (src1 > 0) { \
- dest--; \
- } \
- } else { \
- dest = 0; \
- } \
- } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
- dest = 0; \
- } else if (tmp < 0) { \
- dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
- } else { \
- dest = src1 << tmp; \
- if ((dest >> tmp) != src1) { \
- SET_QC(); \
- dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
- if (src1 > 0) { \
- dest--; \
- } \
- } \
- }} while (0)
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
#undef NEON_FN
-/* The addition of the rounding constant may overflow, so we use an
- * intermediate 64 bit accumulator. */
-uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
+uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
{
- int32_t dest;
- int32_t val = (int32_t)valop;
- int8_t shift = (int8_t)shiftop;
- if (shift >= 32) {
- if (val) {
- SET_QC();
- dest = (val >> 31) ^ ~SIGNBIT;
- } else {
- dest = 0;
- }
- } else if (shift <= -32) {
- dest = 0;
- } else if (shift < 0) {
- int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
- dest = big_dest >> -shift;
- } else {
- dest = val << shift;
- if ((dest >> shift) != val) {
- SET_QC();
- dest = (val >> 31) ^ ~SIGNBIT;
- }
- }
- return dest;
+ return do_sqrshl_bhs(val, (int8_t)shift, 32, true, env->vfp.qc);
}
-/* Handling addition overflow with 64 bit input values is more
- * tricky than with 32 bit values. */
-uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
-{
- int8_t shift = (uint8_t)shiftop;
- int64_t val = valop;
-
- if (shift >= 64) {
- if (val) {
- SET_QC();
- val = (val >> 63) ^ ~SIGNBIT64;
- }
- } else if (shift <= -64) {
- val = 0;
- } else if (shift < 0) {
- val >>= (-shift - 1);
- if (val == INT64_MAX) {
- /* In this case, it means that the rounding constant is 1,
- * and the addition would overflow. Return the actual
- * result directly. */
- val = 0x4000000000000000ULL;
- } else {
- val++;
- val >>= 1;
- }
- } else {
- int64_t tmp = val;
- val <<= shift;
- if ((val >> shift) != tmp) {
- SET_QC();
- val = (tmp >> 63) ^ ~SIGNBIT64;
- }
- }
- return val;
+uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
+{
+ return do_sqrshl_d(val, (int8_t)shift, true, env->vfp.qc);
}
uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 5c90603358..cb077bfde9 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -30,6 +30,8 @@
%size_23 23:2
%dtype_23_13 23:2 13:2
%index3_22_19 22:1 19:2
+%index3_19_11 19:2 11:1
+%index2_20_11 20:1 11:1
# A combination of tsz:imm3 -- extract esize.
%tszimm_esz 22:2 5:5 !function=tszimm_esz
@@ -65,11 +67,15 @@
&rr_dbm rd rn dbm
&rrri rd rn rm imm
&rri_esz rd rn imm esz
+&rrri_esz rd rn rm imm esz
&rrr_esz rd rn rm esz
+&rrx_esz rd rn rm index esz
&rpr_esz rd pg rn esz
&rpr_s rd pg rn s
&rprr_s rd pg rn rm s
&rprr_esz rd pg rn rm esz
+&rrrr_esz rd ra rn rm esz
+&rrxr_esz rd rn rm ra index esz
&rprrr_esz rd pg rn rm ra esz
&rpri_esz rd pg rn imm esz
&ptrue rd esz pat s
@@ -112,6 +118,8 @@
@pd_pn_pm ........ esz:2 .. rm:4 ....... rn:4 . rd:4 &rrr_esz
@rdn_rm ........ esz:2 ...... ...... rm:5 rd:5 \
&rrr_esz rn=%reg_movprfx
+@rdn_rm_e0 ........ .. ...... ...... rm:5 rd:5 \
+ &rrr_esz rn=%reg_movprfx esz=0
@rdn_sh_i8u ........ esz:2 ...... ...... ..... rd:5 \
&rri_esz rn=%reg_movprfx imm=%sh8_i8u
@rdn_i8u ........ esz:2 ...... ... imm:8 rd:5 \
@@ -119,6 +127,16 @@
@rdn_i8s ........ esz:2 ...... ... imm:s8 rd:5 \
&rri_esz rn=%reg_movprfx
+# Four operand, vector element size
+@rda_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 \
+ &rrrr_esz ra=%reg_movprfx
+
+# Four operand with unused vector element size
+@rda_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 \
+ &rrrr_esz esz=0 ra=%reg_movprfx
+@rdn_ra_rm_e0 ........ ... rm:5 ... ... ra:5 rd:5 \
+ &rrrr_esz esz=0 rn=%reg_movprfx
+
# Three operand with "memory" size, aka immediate left shift
@rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri
@@ -137,6 +155,7 @@
&rprrr_esz rn=%reg_movprfx
@rdn_pg_rm_ra ........ esz:2 . ra:5 ... pg:3 rm:5 rd:5 \
&rprrr_esz rn=%reg_movprfx
+@rd_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5 &rprr_esz
# One register operand, with governing predicate, vector element size
@rd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rpr_esz
@@ -234,6 +253,32 @@
@rpri_scatter_store ....... msz:2 .. imm:5 ... pg:3 rn:5 rd:5 \
&rpri_scatter_store
+# Two registers and a scalar by N-bit index
+@rrx_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \
+ &rrx_esz index=%index3_22_19
+@rrx_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 &rrx_esz
+@rrx_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 &rrx_esz
+
+# Two registers and a scalar by N-bit index, alternate
+@rrx_3a ........ .. . .. rm:3 ...... rn:5 rd:5 \
+ &rrx_esz index=%index3_19_11
+@rrx_2a ........ .. . . rm:4 ...... rn:5 rd:5 \
+ &rrx_esz index=%index2_20_11
+
+# Three registers and a scalar by N-bit index
+@rrxr_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx index=%index3_22_19
+@rrxr_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx
+@rrxr_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx
+
+# Three registers and a scalar by N-bit index, alternate
+@rrxr_3a ........ .. ... rm:3 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx index=%index3_19_11
+@rrxr_2a ........ .. .. rm:4 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx index=%index2_20_11
+
###########################################################################
# Instruction patterns. Grouped according to the SVE encodingindex.xhtml.
@@ -297,6 +342,11 @@ ASR_zpzi 00000100 .. 000 000 100 ... .. ... ..... @rdn_pg_tszimm_shr
LSR_zpzi 00000100 .. 000 001 100 ... .. ... ..... @rdn_pg_tszimm_shr
LSL_zpzi 00000100 .. 000 011 100 ... .. ... ..... @rdn_pg_tszimm_shl
ASRD 00000100 .. 000 100 100 ... .. ... ..... @rdn_pg_tszimm_shr
+SQSHL_zpzi 00000100 .. 000 110 100 ... .. ... ..... @rdn_pg_tszimm_shl
+UQSHL_zpzi 00000100 .. 000 111 100 ... .. ... ..... @rdn_pg_tszimm_shl
+SRSHR 00000100 .. 001 100 100 ... .. ... ..... @rdn_pg_tszimm_shr
+URSHR 00000100 .. 001 101 100 ... .. ... ..... @rdn_pg_tszimm_shr
+SQSHLU 00000100 .. 001 111 100 ... .. ... ..... @rdn_pg_tszimm_shl
# SVE bitwise shift by vector (predicated)
ASR_zpzz 00000100 .. 010 000 100 ... ..... ..... @rdn_pg_rm
@@ -374,6 +424,17 @@ ORR_zzz 00000100 01 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
EOR_zzz 00000100 10 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
BIC_zzz 00000100 11 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
+XAR 00000100 .. 1 ..... 001 101 rm:5 rd:5 &rrri_esz \
+ rn=%reg_movprfx esz=%tszimm16_esz imm=%tszimm16_shr
+
+# SVE2 bitwise ternary operations
+EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
+BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+BCAX 00000100 01 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
+BSL1N 00000100 01 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+BSL2N 00000100 10 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+NBSL 00000100 11 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+
### SVE Index Generation Group
# SVE index generation (immediate start, immediate increment)
@@ -472,10 +533,14 @@ CPY_z_i 00000101 .. 01 .... 00 . ........ ..... @rdn_pg4 imm=%sh8_i8s
### SVE Permute - Extract Group
-# SVE extract vector (immediate offset)
+# SVE extract vector (destructive)
EXT 00000101 001 ..... 000 ... rm:5 rd:5 \
&rrri rn=%reg_movprfx imm=%imm8_16_10
+# SVE2 extract vector (constructive)
+EXT_sve2 00000101 011 ..... 000 ... rn:5 rd:5 \
+ &rri imm=%imm8_16_10
+
### SVE Permute - Unpredicated Group
# SVE broadcast general register
@@ -500,6 +565,11 @@ TBL 00000101 .. 1 ..... 001100 ..... ..... @rd_rn_rm
# SVE unpack vector elements
UNPK 00000101 esz:2 1100 u:1 h:1 001110 rn:5 rd:5
+# SVE2 Table Lookup (three sources)
+
+TBL_sve2 00000101 .. 1 ..... 001010 ..... ..... @rd_rn_rm
+TBX 00000101 .. 1 ..... 001011 ..... ..... @rd_rn_rm
+
### SVE Permute - Predicates Group
# SVE permute predicate elements
@@ -527,6 +597,14 @@ UZP2_z 00000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm
TRN1_z 00000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm
TRN2_z 00000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm
+# SVE2 permute vector segments
+ZIP1_q 00000101 10 1 ..... 000 000 ..... ..... @rd_rn_rm_e0
+ZIP2_q 00000101 10 1 ..... 000 001 ..... ..... @rd_rn_rm_e0
+UZP1_q 00000101 10 1 ..... 000 010 ..... ..... @rd_rn_rm_e0
+UZP2_q 00000101 10 1 ..... 000 011 ..... ..... @rd_rn_rm_e0
+TRN1_q 00000101 10 1 ..... 000 110 ..... ..... @rd_rn_rm_e0
+TRN2_q 00000101 10 1 ..... 000 111 ..... ..... @rd_rn_rm_e0
+
### SVE Permute - Predicated Group
# SVE compress active elements
@@ -566,9 +644,12 @@ REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
-# SVE vector splice (predicated)
+# SVE vector splice (predicated, destructive)
SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
+# SVE2 vector splice (predicated, constructive)
+SPLICE_sve2 00000101 .. 101 101 100 ... ..... ..... @rd_pg_rn
+
### SVE Select Vectors Group
# SVE select vector elements (predicated)
@@ -695,7 +776,10 @@ SINCDECP_z 00100101 .. 1010 d:1 u:1 10000 00 .... ..... @incdec2_pred
CTERM 00100101 1 sf:1 1 rm:5 001000 rn:5 ne:1 0000
# SVE integer compare scalar count and limit
-WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 1 rn:5 eq:1 rd:4
+WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 lt:1 rn:5 eq:1 rd:4
+
+# SVE2 pointer conflict compare
+WHILE_ptr 00100101 esz:2 1 rm:5 001 100 rn:5 rw:1 rd:4
### SVE Integer Wide Immediate - Unpredicated Group
@@ -724,13 +808,114 @@ UMIN_zzi 00100101 .. 101 011 110 ........ ..... @rdn_i8u
MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s
# SVE integer dot product (unpredicated)
-DOT_zzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 ra=%reg_movprfx
+DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \
+ ra=%reg_movprfx
+
+# SVE2 complex dot product (vectors)
+CDOT_zzzz 01000100 esz:2 0 rm:5 0001 rot:2 rn:5 rd:5 ra=%reg_movprfx
+
+#### SVE Multiply - Indexed
# SVE integer dot product (indexed)
-DOT_zzx 01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \
- sz=0 ra=%reg_movprfx
-DOT_zzx 01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \
- sz=1 ra=%reg_movprfx
+SDOT_zzxw_s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2
+SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
+UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
+UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
+
+# SVE2 integer multiply-add (indexed)
+MLA_zzxz_h 01000100 0. 1 ..... 000010 ..... ..... @rrxr_3 esz=1
+MLA_zzxz_s 01000100 10 1 ..... 000010 ..... ..... @rrxr_2 esz=2
+MLA_zzxz_d 01000100 11 1 ..... 000010 ..... ..... @rrxr_1 esz=3
+MLS_zzxz_h 01000100 0. 1 ..... 000011 ..... ..... @rrxr_3 esz=1
+MLS_zzxz_s 01000100 10 1 ..... 000011 ..... ..... @rrxr_2 esz=2
+MLS_zzxz_d 01000100 11 1 ..... 000011 ..... ..... @rrxr_1 esz=3
+
+# SVE2 saturating multiply-add high (indexed)
+SQRDMLAH_zzxz_h 01000100 0. 1 ..... 000100 ..... ..... @rrxr_3 esz=1
+SQRDMLAH_zzxz_s 01000100 10 1 ..... 000100 ..... ..... @rrxr_2 esz=2
+SQRDMLAH_zzxz_d 01000100 11 1 ..... 000100 ..... ..... @rrxr_1 esz=3
+SQRDMLSH_zzxz_h 01000100 0. 1 ..... 000101 ..... ..... @rrxr_3 esz=1
+SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2
+SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3
+
+# SVE mixed sign dot product (indexed)
+USDOT_zzxw_s 01000100 10 1 ..... 000110 ..... ..... @rrxr_2 esz=2
+SUDOT_zzxw_s 01000100 10 1 ..... 000111 ..... ..... @rrxr_2 esz=2
+
+# SVE2 saturating multiply-add (indexed)
+SQDMLALB_zzxw_s 01000100 10 1 ..... 0010.0 ..... ..... @rrxr_3a esz=2
+SQDMLALB_zzxw_d 01000100 11 1 ..... 0010.0 ..... ..... @rrxr_2a esz=3
+SQDMLALT_zzxw_s 01000100 10 1 ..... 0010.1 ..... ..... @rrxr_3a esz=2
+SQDMLALT_zzxw_d 01000100 11 1 ..... 0010.1 ..... ..... @rrxr_2a esz=3
+SQDMLSLB_zzxw_s 01000100 10 1 ..... 0011.0 ..... ..... @rrxr_3a esz=2
+SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3
+SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2
+SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3
+
+# SVE2 complex integer dot product (indexed)
+CDOT_zzxw_s 01000100 10 1 index:2 rm:3 0100 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+CDOT_zzxw_d 01000100 11 1 index:1 rm:4 0100 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+
+# SVE2 complex integer multiply-add (indexed)
+CMLA_zzxz_h 01000100 10 1 index:2 rm:3 0110 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+CMLA_zzxz_s 01000100 11 1 index:1 rm:4 0110 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+
+# SVE2 complex saturating integer multiply-add (indexed)
+SQRDCMLAH_zzxz_h 01000100 10 1 index:2 rm:3 0111 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+SQRDCMLAH_zzxz_s 01000100 11 1 index:1 rm:4 0111 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+
+# SVE2 multiply-add long (indexed)
+SMLALB_zzxw_s 01000100 10 1 ..... 1000.0 ..... ..... @rrxr_3a esz=2
+SMLALB_zzxw_d 01000100 11 1 ..... 1000.0 ..... ..... @rrxr_2a esz=3
+SMLALT_zzxw_s 01000100 10 1 ..... 1000.1 ..... ..... @rrxr_3a esz=2
+SMLALT_zzxw_d 01000100 11 1 ..... 1000.1 ..... ..... @rrxr_2a esz=3
+UMLALB_zzxw_s 01000100 10 1 ..... 1001.0 ..... ..... @rrxr_3a esz=2
+UMLALB_zzxw_d 01000100 11 1 ..... 1001.0 ..... ..... @rrxr_2a esz=3
+UMLALT_zzxw_s 01000100 10 1 ..... 1001.1 ..... ..... @rrxr_3a esz=2
+UMLALT_zzxw_d 01000100 11 1 ..... 1001.1 ..... ..... @rrxr_2a esz=3
+SMLSLB_zzxw_s 01000100 10 1 ..... 1010.0 ..... ..... @rrxr_3a esz=2
+SMLSLB_zzxw_d 01000100 11 1 ..... 1010.0 ..... ..... @rrxr_2a esz=3
+SMLSLT_zzxw_s 01000100 10 1 ..... 1010.1 ..... ..... @rrxr_3a esz=2
+SMLSLT_zzxw_d 01000100 11 1 ..... 1010.1 ..... ..... @rrxr_2a esz=3
+UMLSLB_zzxw_s 01000100 10 1 ..... 1011.0 ..... ..... @rrxr_3a esz=2
+UMLSLB_zzxw_d 01000100 11 1 ..... 1011.0 ..... ..... @rrxr_2a esz=3
+UMLSLT_zzxw_s 01000100 10 1 ..... 1011.1 ..... ..... @rrxr_3a esz=2
+UMLSLT_zzxw_d 01000100 11 1 ..... 1011.1 ..... ..... @rrxr_2a esz=3
+
+# SVE2 integer multiply long (indexed)
+SMULLB_zzx_s 01000100 10 1 ..... 1100.0 ..... ..... @rrx_3a esz=2
+SMULLB_zzx_d 01000100 11 1 ..... 1100.0 ..... ..... @rrx_2a esz=3
+SMULLT_zzx_s 01000100 10 1 ..... 1100.1 ..... ..... @rrx_3a esz=2
+SMULLT_zzx_d 01000100 11 1 ..... 1100.1 ..... ..... @rrx_2a esz=3
+UMULLB_zzx_s 01000100 10 1 ..... 1101.0 ..... ..... @rrx_3a esz=2
+UMULLB_zzx_d 01000100 11 1 ..... 1101.0 ..... ..... @rrx_2a esz=3
+UMULLT_zzx_s 01000100 10 1 ..... 1101.1 ..... ..... @rrx_3a esz=2
+UMULLT_zzx_d 01000100 11 1 ..... 1101.1 ..... ..... @rrx_2a esz=3
+
+# SVE2 saturating multiply (indexed)
+SQDMULLB_zzx_s 01000100 10 1 ..... 1110.0 ..... ..... @rrx_3a esz=2
+SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3
+SQDMULLT_zzx_s 01000100 10 1 ..... 1110.1 ..... ..... @rrx_3a esz=2
+SQDMULLT_zzx_d 01000100 11 1 ..... 1110.1 ..... ..... @rrx_2a esz=3
+
+# SVE2 saturating multiply high (indexed)
+SQDMULH_zzx_h 01000100 0. 1 ..... 111100 ..... ..... @rrx_3 esz=1
+SQDMULH_zzx_s 01000100 10 1 ..... 111100 ..... ..... @rrx_2 esz=2
+SQDMULH_zzx_d 01000100 11 1 ..... 111100 ..... ..... @rrx_1 esz=3
+SQRDMULH_zzx_h 01000100 0. 1 ..... 111101 ..... ..... @rrx_3 esz=1
+SQRDMULH_zzx_s 01000100 10 1 ..... 111101 ..... ..... @rrx_2 esz=2
+SQRDMULH_zzx_d 01000100 11 1 ..... 111101 ..... ..... @rrx_1 esz=3
+
+# SVE2 integer multiply (indexed)
+MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1
+MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2
+MUL_zzx_d 01000100 11 1 ..... 111110 ..... ..... @rrx_1 esz=3
# SVE floating-point complex add (predicated)
FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \
@@ -749,20 +934,19 @@ FCMLA_zzxz 01100100 11 1 index:1 rm:4 0001 rot:2 rn:5 rd:5 \
### SVE FP Multiply-Add Indexed Group
# SVE floating-point multiply-add (indexed)
-FMLA_zzxz 01100100 0.1 .. rm:3 00000 sub:1 rn:5 rd:5 \
- ra=%reg_movprfx index=%index3_22_19 esz=1
-FMLA_zzxz 01100100 101 index:2 rm:3 00000 sub:1 rn:5 rd:5 \
- ra=%reg_movprfx esz=2
-FMLA_zzxz 01100100 111 index:1 rm:4 00000 sub:1 rn:5 rd:5 \
- ra=%reg_movprfx esz=3
+FMLA_zzxz 01100100 0. 1 ..... 000000 ..... ..... @rrxr_3 esz=1
+FMLA_zzxz 01100100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2
+FMLA_zzxz 01100100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
+FMLS_zzxz 01100100 0. 1 ..... 000001 ..... ..... @rrxr_3 esz=1
+FMLS_zzxz 01100100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
+FMLS_zzxz 01100100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
### SVE FP Multiply Indexed Group
# SVE floating-point multiply (indexed)
-FMUL_zzx 01100100 0.1 .. rm:3 001000 rn:5 rd:5 \
- index=%index3_22_19 esz=1
-FMUL_zzx 01100100 101 index:2 rm:3 001000 rn:5 rd:5 esz=2
-FMUL_zzx 01100100 111 index:1 rm:4 001000 rn:5 rd:5 esz=3
+FMUL_zzx 01100100 0. 1 ..... 001000 ..... ..... @rrx_3 esz=1
+FMUL_zzx 01100100 10 1 ..... 001000 ..... ..... @rrx_2 esz=2
+FMUL_zzx 01100100 11 1 ..... 001000 ..... ..... @rrx_1 esz=3
### SVE FP Fast Reduction Group
@@ -957,11 +1141,15 @@ LD_zpri 1010010 .. nreg:2 0.... 111 ... ..... ..... @rpri_load_msz
# SVE load and broadcast quadword (scalar plus scalar)
LD1RQ_zprr 1010010 .. 00 ..... 000 ... ..... ..... \
@rprr_load_msz nreg=0
+LD1RO_zprr 1010010 .. 01 ..... 000 ... ..... ..... \
+ @rprr_load_msz nreg=0
# SVE load and broadcast quadword (scalar plus immediate)
# LD1RQB, LD1RQH, LD1RQS, LD1RQD
LD1RQ_zpri 1010010 .. 00 0.... 001 ... ..... ..... \
@rpri_load_msz nreg=0
+LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \
+ @rpri_load_msz nreg=0
# SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets)
PRF 1000010 00 -1 ----- 0-- --- ----- 0 ----
@@ -1090,3 +1278,353 @@ ST1_zprz 1110010 .. 00 ..... 100 ... ..... ..... \
@rprr_scatter_store xs=0 esz=3 scale=0
ST1_zprz 1110010 .. 00 ..... 110 ... ..... ..... \
@rprr_scatter_store xs=1 esz=3 scale=0
+
+#### SVE2 Support
+
+### SVE2 Integer Multiply - Unpredicated
+
+# SVE2 integer multiply vectors (unpredicated)
+MUL_zzz 00000100 .. 1 ..... 0110 00 ..... ..... @rd_rn_rm
+SMULH_zzz 00000100 .. 1 ..... 0110 10 ..... ..... @rd_rn_rm
+UMULH_zzz 00000100 .. 1 ..... 0110 11 ..... ..... @rd_rn_rm
+PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0
+
+# SVE2 signed saturating doubling multiply high (unpredicated)
+SQDMULH_zzz 00000100 .. 1 ..... 0111 00 ..... ..... @rd_rn_rm
+SQRDMULH_zzz 00000100 .. 1 ..... 0111 01 ..... ..... @rd_rn_rm
+
+### SVE2 Integer - Predicated
+
+SADALP_zpzz 01000100 .. 000 100 101 ... ..... ..... @rdm_pg_rn
+UADALP_zpzz 01000100 .. 000 101 101 ... ..... ..... @rdm_pg_rn
+
+### SVE2 integer unary operations (predicated)
+
+URECPE 01000100 .. 000 000 101 ... ..... ..... @rd_pg_rn
+URSQRTE 01000100 .. 000 001 101 ... ..... ..... @rd_pg_rn
+SQABS 01000100 .. 001 000 101 ... ..... ..... @rd_pg_rn
+SQNEG 01000100 .. 001 001 101 ... ..... ..... @rd_pg_rn
+
+### SVE2 saturating/rounding bitwise shift left (predicated)
+
+SRSHL 01000100 .. 000 010 100 ... ..... ..... @rdn_pg_rm
+URSHL 01000100 .. 000 011 100 ... ..... ..... @rdn_pg_rm
+SRSHL 01000100 .. 000 110 100 ... ..... ..... @rdm_pg_rn # SRSHLR
+URSHL 01000100 .. 000 111 100 ... ..... ..... @rdm_pg_rn # URSHLR
+
+SQSHL 01000100 .. 001 000 100 ... ..... ..... @rdn_pg_rm
+UQSHL 01000100 .. 001 001 100 ... ..... ..... @rdn_pg_rm
+SQSHL 01000100 .. 001 100 100 ... ..... ..... @rdm_pg_rn # SQSHLR
+UQSHL 01000100 .. 001 101 100 ... ..... ..... @rdm_pg_rn # UQSHLR
+
+SQRSHL 01000100 .. 001 010 100 ... ..... ..... @rdn_pg_rm
+UQRSHL 01000100 .. 001 011 100 ... ..... ..... @rdn_pg_rm
+SQRSHL 01000100 .. 001 110 100 ... ..... ..... @rdm_pg_rn # SQRSHLR
+UQRSHL 01000100 .. 001 111 100 ... ..... ..... @rdm_pg_rn # UQRSHLR
+
+### SVE2 integer halving add/subtract (predicated)
+
+SHADD 01000100 .. 010 000 100 ... ..... ..... @rdn_pg_rm
+UHADD 01000100 .. 010 001 100 ... ..... ..... @rdn_pg_rm
+SHSUB 01000100 .. 010 010 100 ... ..... ..... @rdn_pg_rm
+UHSUB 01000100 .. 010 011 100 ... ..... ..... @rdn_pg_rm
+SRHADD 01000100 .. 010 100 100 ... ..... ..... @rdn_pg_rm
+URHADD 01000100 .. 010 101 100 ... ..... ..... @rdn_pg_rm
+SHSUB 01000100 .. 010 110 100 ... ..... ..... @rdm_pg_rn # SHSUBR
+UHSUB 01000100 .. 010 111 100 ... ..... ..... @rdm_pg_rn # UHSUBR
+
+### SVE2 integer pairwise arithmetic
+
+ADDP 01000100 .. 010 001 101 ... ..... ..... @rdn_pg_rm
+SMAXP 01000100 .. 010 100 101 ... ..... ..... @rdn_pg_rm
+UMAXP 01000100 .. 010 101 101 ... ..... ..... @rdn_pg_rm
+SMINP 01000100 .. 010 110 101 ... ..... ..... @rdn_pg_rm
+UMINP 01000100 .. 010 111 101 ... ..... ..... @rdn_pg_rm
+
+### SVE2 saturating add/subtract (predicated)
+
+SQADD_zpzz 01000100 .. 011 000 100 ... ..... ..... @rdn_pg_rm
+UQADD_zpzz 01000100 .. 011 001 100 ... ..... ..... @rdn_pg_rm
+SQSUB_zpzz 01000100 .. 011 010 100 ... ..... ..... @rdn_pg_rm
+UQSUB_zpzz 01000100 .. 011 011 100 ... ..... ..... @rdn_pg_rm
+SUQADD 01000100 .. 011 100 100 ... ..... ..... @rdn_pg_rm
+USQADD 01000100 .. 011 101 100 ... ..... ..... @rdn_pg_rm
+SQSUB_zpzz 01000100 .. 011 110 100 ... ..... ..... @rdm_pg_rn # SQSUBR
+UQSUB_zpzz 01000100 .. 011 111 100 ... ..... ..... @rdm_pg_rn # UQSUBR
+
+#### SVE2 Widening Integer Arithmetic
+
+## SVE2 integer add/subtract long
+
+SADDLB 01000101 .. 0 ..... 00 0000 ..... ..... @rd_rn_rm
+SADDLT 01000101 .. 0 ..... 00 0001 ..... ..... @rd_rn_rm
+UADDLB 01000101 .. 0 ..... 00 0010 ..... ..... @rd_rn_rm
+UADDLT 01000101 .. 0 ..... 00 0011 ..... ..... @rd_rn_rm
+
+SSUBLB 01000101 .. 0 ..... 00 0100 ..... ..... @rd_rn_rm
+SSUBLT 01000101 .. 0 ..... 00 0101 ..... ..... @rd_rn_rm
+USUBLB 01000101 .. 0 ..... 00 0110 ..... ..... @rd_rn_rm
+USUBLT 01000101 .. 0 ..... 00 0111 ..... ..... @rd_rn_rm
+
+SABDLB 01000101 .. 0 ..... 00 1100 ..... ..... @rd_rn_rm
+SABDLT 01000101 .. 0 ..... 00 1101 ..... ..... @rd_rn_rm
+UABDLB 01000101 .. 0 ..... 00 1110 ..... ..... @rd_rn_rm
+UABDLT 01000101 .. 0 ..... 00 1111 ..... ..... @rd_rn_rm
+
+## SVE2 integer add/subtract interleaved long
+
+SADDLBT 01000101 .. 0 ..... 1000 00 ..... ..... @rd_rn_rm
+SSUBLBT 01000101 .. 0 ..... 1000 10 ..... ..... @rd_rn_rm
+SSUBLTB 01000101 .. 0 ..... 1000 11 ..... ..... @rd_rn_rm
+
+## SVE2 integer add/subtract wide
+
+SADDWB 01000101 .. 0 ..... 010 000 ..... ..... @rd_rn_rm
+SADDWT 01000101 .. 0 ..... 010 001 ..... ..... @rd_rn_rm
+UADDWB 01000101 .. 0 ..... 010 010 ..... ..... @rd_rn_rm
+UADDWT 01000101 .. 0 ..... 010 011 ..... ..... @rd_rn_rm
+
+SSUBWB 01000101 .. 0 ..... 010 100 ..... ..... @rd_rn_rm
+SSUBWT 01000101 .. 0 ..... 010 101 ..... ..... @rd_rn_rm
+USUBWB 01000101 .. 0 ..... 010 110 ..... ..... @rd_rn_rm
+USUBWT 01000101 .. 0 ..... 010 111 ..... ..... @rd_rn_rm
+
+## SVE2 integer multiply long
+
+SQDMULLB_zzz 01000101 .. 0 ..... 011 000 ..... ..... @rd_rn_rm
+SQDMULLT_zzz 01000101 .. 0 ..... 011 001 ..... ..... @rd_rn_rm
+PMULLB 01000101 .. 0 ..... 011 010 ..... ..... @rd_rn_rm
+PMULLT 01000101 .. 0 ..... 011 011 ..... ..... @rd_rn_rm
+SMULLB_zzz 01000101 .. 0 ..... 011 100 ..... ..... @rd_rn_rm
+SMULLT_zzz 01000101 .. 0 ..... 011 101 ..... ..... @rd_rn_rm
+UMULLB_zzz 01000101 .. 0 ..... 011 110 ..... ..... @rd_rn_rm
+UMULLT_zzz 01000101 .. 0 ..... 011 111 ..... ..... @rd_rn_rm
+
+## SVE2 bitwise shift left long
+
+# Note bit23 == 0 is handled by esz > 0 in do_sve2_shll_tb.
+SSHLLB 01000101 .. 0 ..... 1010 00 ..... ..... @rd_rn_tszimm_shl
+SSHLLT 01000101 .. 0 ..... 1010 01 ..... ..... @rd_rn_tszimm_shl
+USHLLB 01000101 .. 0 ..... 1010 10 ..... ..... @rd_rn_tszimm_shl
+USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl
+
+## SVE2 bitwise exclusive-or interleaved
+
+EORBT 01000101 .. 0 ..... 10010 0 ..... ..... @rd_rn_rm
+EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm
+
+## SVE integer matrix multiply accumulate
+
+SMMLA 01000101 00 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
+USMMLA 01000101 10 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
+UMMLA 01000101 11 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
+
+## SVE2 bitwise permute
+
+BEXT 01000101 .. 0 ..... 1011 00 ..... ..... @rd_rn_rm
+BDEP 01000101 .. 0 ..... 1011 01 ..... ..... @rd_rn_rm
+BGRP 01000101 .. 0 ..... 1011 10 ..... ..... @rd_rn_rm
+
+#### SVE2 Accumulate
+
+## SVE2 complex integer add
+
+CADD_rot90 01000101 .. 00000 0 11011 0 ..... ..... @rdn_rm
+CADD_rot270 01000101 .. 00000 0 11011 1 ..... ..... @rdn_rm
+SQCADD_rot90 01000101 .. 00000 1 11011 0 ..... ..... @rdn_rm
+SQCADD_rot270 01000101 .. 00000 1 11011 1 ..... ..... @rdn_rm
+
+## SVE2 integer absolute difference and accumulate long
+
+SABALB 01000101 .. 0 ..... 1100 00 ..... ..... @rda_rn_rm
+SABALT 01000101 .. 0 ..... 1100 01 ..... ..... @rda_rn_rm
+UABALB 01000101 .. 0 ..... 1100 10 ..... ..... @rda_rn_rm
+UABALT 01000101 .. 0 ..... 1100 11 ..... ..... @rda_rn_rm
+
+## SVE2 integer add/subtract long with carry
+
+# ADC and SBC decoded via size in helper dispatch.
+ADCLB 01000101 .. 0 ..... 11010 0 ..... ..... @rda_rn_rm
+ADCLT 01000101 .. 0 ..... 11010 1 ..... ..... @rda_rn_rm
+
+## SVE2 bitwise shift right and accumulate
+
+# TODO: Use @rda and %reg_movprfx here.
+SSRA 01000101 .. 0 ..... 1110 00 ..... ..... @rd_rn_tszimm_shr
+USRA 01000101 .. 0 ..... 1110 01 ..... ..... @rd_rn_tszimm_shr
+SRSRA 01000101 .. 0 ..... 1110 10 ..... ..... @rd_rn_tszimm_shr
+URSRA 01000101 .. 0 ..... 1110 11 ..... ..... @rd_rn_tszimm_shr
+
+## SVE2 bitwise shift and insert
+
+SRI 01000101 .. 0 ..... 11110 0 ..... ..... @rd_rn_tszimm_shr
+SLI 01000101 .. 0 ..... 11110 1 ..... ..... @rd_rn_tszimm_shl
+
+## SVE2 integer absolute difference and accumulate
+
+# TODO: Use @rda and %reg_movprfx here.
+SABA 01000101 .. 0 ..... 11111 0 ..... ..... @rd_rn_rm
+UABA 01000101 .. 0 ..... 11111 1 ..... ..... @rd_rn_rm
+
+#### SVE2 Narrowing
+
+## SVE2 saturating extract narrow
+
+# Bits 23, 18-16 are zero, limited in the translator via esz < 3 & imm == 0.
+SQXTNB 01000101 .. 1 ..... 010 000 ..... ..... @rd_rn_tszimm_shl
+SQXTNT 01000101 .. 1 ..... 010 001 ..... ..... @rd_rn_tszimm_shl
+UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl
+UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl
+SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl
+SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl
+
+## SVE2 bitwise shift right narrow
+
+# Bit 23 == 0 is handled by esz > 0 in the translator.
+SQSHRUNB 01000101 .. 1 ..... 00 0000 ..... ..... @rd_rn_tszimm_shr
+SQSHRUNT 01000101 .. 1 ..... 00 0001 ..... ..... @rd_rn_tszimm_shr
+SQRSHRUNB 01000101 .. 1 ..... 00 0010 ..... ..... @rd_rn_tszimm_shr
+SQRSHRUNT 01000101 .. 1 ..... 00 0011 ..... ..... @rd_rn_tszimm_shr
+SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr
+SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr
+RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr
+RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr
+SQSHRNB 01000101 .. 1 ..... 00 1000 ..... ..... @rd_rn_tszimm_shr
+SQSHRNT 01000101 .. 1 ..... 00 1001 ..... ..... @rd_rn_tszimm_shr
+SQRSHRNB 01000101 .. 1 ..... 00 1010 ..... ..... @rd_rn_tszimm_shr
+SQRSHRNT 01000101 .. 1 ..... 00 1011 ..... ..... @rd_rn_tszimm_shr
+UQSHRNB 01000101 .. 1 ..... 00 1100 ..... ..... @rd_rn_tszimm_shr
+UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr
+UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr
+UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr
+
+## SVE2 integer add/subtract narrow high part
+
+ADDHNB 01000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm
+ADDHNT 01000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm
+RADDHNB 01000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm
+RADDHNT 01000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm
+SUBHNB 01000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm
+SUBHNT 01000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm
+RSUBHNB 01000101 .. 1 ..... 011 110 ..... ..... @rd_rn_rm
+RSUBHNT 01000101 .. 1 ..... 011 111 ..... ..... @rd_rn_rm
+
+### SVE2 Character Match
+
+MATCH 01000101 .. 1 ..... 100 ... ..... 0 .... @pd_pg_rn_rm
+NMATCH 01000101 .. 1 ..... 100 ... ..... 1 .... @pd_pg_rn_rm
+
+### SVE2 Histogram Computation
+
+HISTCNT 01000101 .. 1 ..... 110 ... ..... ..... @rd_pg_rn_rm
+HISTSEG 01000101 .. 1 ..... 101 000 ..... ..... @rd_rn_rm
+
+## SVE2 floating-point pairwise operations
+
+FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm
+FMAXNMP 01100100 .. 010 10 0 100 ... ..... ..... @rdn_pg_rm
+FMINNMP 01100100 .. 010 10 1 100 ... ..... ..... @rdn_pg_rm
+FMAXP 01100100 .. 010 11 0 100 ... ..... ..... @rdn_pg_rm
+FMINP 01100100 .. 010 11 1 100 ... ..... ..... @rdn_pg_rm
+
+#### SVE Integer Multiply-Add (unpredicated)
+
+## SVE2 saturating multiply-add long
+
+SQDMLALB_zzzw 01000100 .. 0 ..... 0110 00 ..... ..... @rda_rn_rm
+SQDMLALT_zzzw 01000100 .. 0 ..... 0110 01 ..... ..... @rda_rn_rm
+SQDMLSLB_zzzw 01000100 .. 0 ..... 0110 10 ..... ..... @rda_rn_rm
+SQDMLSLT_zzzw 01000100 .. 0 ..... 0110 11 ..... ..... @rda_rn_rm
+
+## SVE2 saturating multiply-add interleaved long
+
+SQDMLALBT 01000100 .. 0 ..... 00001 0 ..... ..... @rda_rn_rm
+SQDMLSLBT 01000100 .. 0 ..... 00001 1 ..... ..... @rda_rn_rm
+
+## SVE2 saturating multiply-add high
+
+SQRDMLAH_zzzz 01000100 .. 0 ..... 01110 0 ..... ..... @rda_rn_rm
+SQRDMLSH_zzzz 01000100 .. 0 ..... 01110 1 ..... ..... @rda_rn_rm
+
+## SVE2 integer multiply-add long
+
+SMLALB_zzzw 01000100 .. 0 ..... 010 000 ..... ..... @rda_rn_rm
+SMLALT_zzzw 01000100 .. 0 ..... 010 001 ..... ..... @rda_rn_rm
+UMLALB_zzzw 01000100 .. 0 ..... 010 010 ..... ..... @rda_rn_rm
+UMLALT_zzzw 01000100 .. 0 ..... 010 011 ..... ..... @rda_rn_rm
+SMLSLB_zzzw 01000100 .. 0 ..... 010 100 ..... ..... @rda_rn_rm
+SMLSLT_zzzw 01000100 .. 0 ..... 010 101 ..... ..... @rda_rn_rm
+UMLSLB_zzzw 01000100 .. 0 ..... 010 110 ..... ..... @rda_rn_rm
+UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
+
+## SVE2 complex integer multiply-add
+
+CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx
+SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
+
+## SVE mixed sign dot product
+
+USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm
+
+### SVE2 floating point matrix multiply accumulate
+
+FMMLA 01100100 .. 1 ..... 111001 ..... ..... @rda_rn_rm
+
+### SVE2 Memory Gather Load Group
+
+# SVE2 64-bit gather non-temporal load
+# (scalar plus unpacked 32-bit unscaled offsets)
+LDNT1_zprz 1100010 msz:2 00 rm:5 1 u:1 0 pg:3 rn:5 rd:5 \
+ &rprr_gather_load xs=0 esz=3 scale=0 ff=0
+
+# SVE2 32-bit gather non-temporal load (scalar plus 32-bit unscaled offsets)
+LDNT1_zprz 1000010 msz:2 00 rm:5 10 u:1 pg:3 rn:5 rd:5 \
+ &rprr_gather_load xs=0 esz=2 scale=0 ff=0
+
+### SVE2 Memory Store Group
+
+# SVE2 64-bit scatter non-temporal store (vector plus scalar)
+STNT1_zprz 1110010 .. 00 ..... 001 ... ..... ..... \
+ @rprr_scatter_store xs=2 esz=3 scale=0
+
+# SVE2 32-bit scatter non-temporal store (vector plus scalar)
+STNT1_zprz 1110010 .. 10 ..... 001 ... ..... ..... \
+ @rprr_scatter_store xs=0 esz=2 scale=0
+
+### SVE2 Crypto Extensions
+
+# SVE2 crypto unary operations
+# AESMC and AESIMC
+AESMC 01000101 00 10000011100 decrypt:1 00000 rd:5
+
+# SVE2 crypto destructive binary operations
+AESE 01000101 00 10001 0 11100 0 ..... ..... @rdn_rm_e0
+AESD 01000101 00 10001 0 11100 1 ..... ..... @rdn_rm_e0
+SM4E 01000101 00 10001 1 11100 0 ..... ..... @rdn_rm_e0
+
+# SVE2 crypto constructive binary operations
+SM4EKEY 01000101 00 1 ..... 11110 0 ..... ..... @rd_rn_rm_e0
+RAX1 01000101 00 1 ..... 11110 1 ..... ..... @rd_rn_rm_e0
+
+### SVE2 floating-point convert precision odd elements
+FCVTXNT_ds 01100100 00 0010 10 101 ... ..... ..... @rd_pg_rn_e0
+FCVTX_ds 01100101 00 0010 10 101 ... ..... ..... @rd_pg_rn_e0
+FCVTNT_sh 01100100 10 0010 00 101 ... ..... ..... @rd_pg_rn_e0
+FCVTLT_hs 01100100 10 0010 01 101 ... ..... ..... @rd_pg_rn_e0
+FCVTNT_ds 01100100 11 0010 10 101 ... ..... ..... @rd_pg_rn_e0
+FCVTLT_sd 01100100 11 0010 11 101 ... ..... ..... @rd_pg_rn_e0
+
+### SVE2 floating-point convert to integer
+FLOGB 01100101 00 011 esz:2 0101 pg:3 rn:5 rd:5 &rpr_esz
+
+### SVE2 floating-point multiply-add long (vectors)
+FMLALB_zzzw 01100100 10 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
+FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
+FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_e0
+FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_e0
+
+### SVE2 floating-point multiply-add long (indexed)
+FMLALB_zzxw 01100100 10 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
+FMLALT_zzxw 01100100 10 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
+FMLSLB_zzxw 01100100 10 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2
+FMLSLT_zzxw 01100100 10 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index c068dfa0d5..40af3024df 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -26,24 +26,9 @@
#include "tcg/tcg-gvec-desc.h"
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
+#include "vec_internal.h"
-/* Note that vector data is stored in host-endian 64-bit chunks,
- so addressing units smaller than that needs a host-endian fixup. */
-#ifdef HOST_WORDS_BIGENDIAN
-#define H1(x) ((x) ^ 7)
-#define H1_2(x) ((x) ^ 6)
-#define H1_4(x) ((x) ^ 4)
-#define H2(x) ((x) ^ 3)
-#define H4(x) ((x) ^ 1)
-#else
-#define H1(x) (x)
-#define H1_2(x) (x)
-#define H1_4(x) (x)
-#define H2(x) (x)
-#define H4(x) (x)
-#endif
-
/* Return a value for NZCV as per the ARM PredTest pseudofunction.
*
* The return value has bit 31 set if N is set, bit 1 set if Z is clear,
@@ -517,9 +502,429 @@ DO_ZPZZ_D(sve_asr_zpzz_d, int64_t, DO_ASR)
DO_ZPZZ_D(sve_lsr_zpzz_d, uint64_t, DO_LSR)
DO_ZPZZ_D(sve_lsl_zpzz_d, uint64_t, DO_LSL)
+static inline uint16_t do_sadalp_h(int16_t n, int16_t m)
+{
+ int8_t n1 = n, n2 = n >> 8;
+ return m + n1 + n2;
+}
+
+static inline uint32_t do_sadalp_s(int32_t n, int32_t m)
+{
+ int16_t n1 = n, n2 = n >> 16;
+ return m + n1 + n2;
+}
+
+static inline uint64_t do_sadalp_d(int64_t n, int64_t m)
+{
+ int32_t n1 = n, n2 = n >> 32;
+ return m + n1 + n2;
+}
+
+DO_ZPZZ(sve2_sadalp_zpzz_h, int16_t, H1_2, do_sadalp_h)
+DO_ZPZZ(sve2_sadalp_zpzz_s, int32_t, H1_4, do_sadalp_s)
+DO_ZPZZ_D(sve2_sadalp_zpzz_d, int64_t, do_sadalp_d)
+
+static inline uint16_t do_uadalp_h(uint16_t n, uint16_t m)
+{
+ uint8_t n1 = n, n2 = n >> 8;
+ return m + n1 + n2;
+}
+
+static inline uint32_t do_uadalp_s(uint32_t n, uint32_t m)
+{
+ uint16_t n1 = n, n2 = n >> 16;
+ return m + n1 + n2;
+}
+
+static inline uint64_t do_uadalp_d(uint64_t n, uint64_t m)
+{
+ uint32_t n1 = n, n2 = n >> 32;
+ return m + n1 + n2;
+}
+
+DO_ZPZZ(sve2_uadalp_zpzz_h, uint16_t, H1_2, do_uadalp_h)
+DO_ZPZZ(sve2_uadalp_zpzz_s, uint32_t, H1_4, do_uadalp_s)
+DO_ZPZZ_D(sve2_uadalp_zpzz_d, uint64_t, do_uadalp_d)
+
+#define do_srshl_b(n, m) do_sqrshl_bhs(n, m, 8, true, NULL)
+#define do_srshl_h(n, m) do_sqrshl_bhs(n, m, 16, true, NULL)
+#define do_srshl_s(n, m) do_sqrshl_bhs(n, m, 32, true, NULL)
+#define do_srshl_d(n, m) do_sqrshl_d(n, m, true, NULL)
+
+DO_ZPZZ(sve2_srshl_zpzz_b, int8_t, H1, do_srshl_b)
+DO_ZPZZ(sve2_srshl_zpzz_h, int16_t, H1_2, do_srshl_h)
+DO_ZPZZ(sve2_srshl_zpzz_s, int32_t, H1_4, do_srshl_s)
+DO_ZPZZ_D(sve2_srshl_zpzz_d, int64_t, do_srshl_d)
+
+#define do_urshl_b(n, m) do_uqrshl_bhs(n, (int8_t)m, 8, true, NULL)
+#define do_urshl_h(n, m) do_uqrshl_bhs(n, (int16_t)m, 16, true, NULL)
+#define do_urshl_s(n, m) do_uqrshl_bhs(n, m, 32, true, NULL)
+#define do_urshl_d(n, m) do_uqrshl_d(n, m, true, NULL)
+
+DO_ZPZZ(sve2_urshl_zpzz_b, uint8_t, H1, do_urshl_b)
+DO_ZPZZ(sve2_urshl_zpzz_h, uint16_t, H1_2, do_urshl_h)
+DO_ZPZZ(sve2_urshl_zpzz_s, uint32_t, H1_4, do_urshl_s)
+DO_ZPZZ_D(sve2_urshl_zpzz_d, uint64_t, do_urshl_d)
+
+/*
+ * Unlike the NEON and AdvSIMD versions, there is no QC bit to set.
+ * We pass in a pointer to a dummy saturation field to trigger
+ * the saturating arithmetic but discard the information about
+ * whether it has occurred.
+ */
+#define do_sqshl_b(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, false, &discard); })
+#define do_sqshl_h(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, false, &discard); })
+#define do_sqshl_s(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, false, &discard); })
+#define do_sqshl_d(n, m) \
+ ({ uint32_t discard; do_sqrshl_d(n, m, false, &discard); })
+
+DO_ZPZZ(sve2_sqshl_zpzz_b, int8_t, H1_2, do_sqshl_b)
+DO_ZPZZ(sve2_sqshl_zpzz_h, int16_t, H1_2, do_sqshl_h)
+DO_ZPZZ(sve2_sqshl_zpzz_s, int32_t, H1_4, do_sqshl_s)
+DO_ZPZZ_D(sve2_sqshl_zpzz_d, int64_t, do_sqshl_d)
+
+#define do_uqshl_b(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, false, &discard); })
+#define do_uqshl_h(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, false, &discard); })
+#define do_uqshl_s(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, false, &discard); })
+#define do_uqshl_d(n, m) \
+ ({ uint32_t discard; do_uqrshl_d(n, m, false, &discard); })
+
+DO_ZPZZ(sve2_uqshl_zpzz_b, uint8_t, H1_2, do_uqshl_b)
+DO_ZPZZ(sve2_uqshl_zpzz_h, uint16_t, H1_2, do_uqshl_h)
+DO_ZPZZ(sve2_uqshl_zpzz_s, uint32_t, H1_4, do_uqshl_s)
+DO_ZPZZ_D(sve2_uqshl_zpzz_d, uint64_t, do_uqshl_d)
+
+#define do_sqrshl_b(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, true, &discard); })
+#define do_sqrshl_h(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, true, &discard); })
+#define do_sqrshl_s(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, true, &discard); })
+#define do_sqrshl_d(n, m) \
+ ({ uint32_t discard; do_sqrshl_d(n, m, true, &discard); })
+
+DO_ZPZZ(sve2_sqrshl_zpzz_b, int8_t, H1_2, do_sqrshl_b)
+DO_ZPZZ(sve2_sqrshl_zpzz_h, int16_t, H1_2, do_sqrshl_h)
+DO_ZPZZ(sve2_sqrshl_zpzz_s, int32_t, H1_4, do_sqrshl_s)
+DO_ZPZZ_D(sve2_sqrshl_zpzz_d, int64_t, do_sqrshl_d)
+
+#undef do_sqrshl_d
+
+#define do_uqrshl_b(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, true, &discard); })
+#define do_uqrshl_h(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, true, &discard); })
+#define do_uqrshl_s(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, true, &discard); })
+#define do_uqrshl_d(n, m) \
+ ({ uint32_t discard; do_uqrshl_d(n, m, true, &discard); })
+
+DO_ZPZZ(sve2_uqrshl_zpzz_b, uint8_t, H1_2, do_uqrshl_b)
+DO_ZPZZ(sve2_uqrshl_zpzz_h, uint16_t, H1_2, do_uqrshl_h)
+DO_ZPZZ(sve2_uqrshl_zpzz_s, uint32_t, H1_4, do_uqrshl_s)
+DO_ZPZZ_D(sve2_uqrshl_zpzz_d, uint64_t, do_uqrshl_d)
+
+#undef do_uqrshl_d
+
+#define DO_HADD_BHS(n, m) (((int64_t)n + m) >> 1)
+#define DO_HADD_D(n, m) ((n >> 1) + (m >> 1) + (n & m & 1))
+
+DO_ZPZZ(sve2_shadd_zpzz_b, int8_t, H1, DO_HADD_BHS)
+DO_ZPZZ(sve2_shadd_zpzz_h, int16_t, H1_2, DO_HADD_BHS)
+DO_ZPZZ(sve2_shadd_zpzz_s, int32_t, H1_4, DO_HADD_BHS)
+DO_ZPZZ_D(sve2_shadd_zpzz_d, int64_t, DO_HADD_D)
+
+DO_ZPZZ(sve2_uhadd_zpzz_b, uint8_t, H1, DO_HADD_BHS)
+DO_ZPZZ(sve2_uhadd_zpzz_h, uint16_t, H1_2, DO_HADD_BHS)
+DO_ZPZZ(sve2_uhadd_zpzz_s, uint32_t, H1_4, DO_HADD_BHS)
+DO_ZPZZ_D(sve2_uhadd_zpzz_d, uint64_t, DO_HADD_D)
+
+#define DO_RHADD_BHS(n, m) (((int64_t)n + m + 1) >> 1)
+#define DO_RHADD_D(n, m) ((n >> 1) + (m >> 1) + ((n | m) & 1))
+
+DO_ZPZZ(sve2_srhadd_zpzz_b, int8_t, H1, DO_RHADD_BHS)
+DO_ZPZZ(sve2_srhadd_zpzz_h, int16_t, H1_2, DO_RHADD_BHS)
+DO_ZPZZ(sve2_srhadd_zpzz_s, int32_t, H1_4, DO_RHADD_BHS)
+DO_ZPZZ_D(sve2_srhadd_zpzz_d, int64_t, DO_RHADD_D)
+
+DO_ZPZZ(sve2_urhadd_zpzz_b, uint8_t, H1, DO_RHADD_BHS)
+DO_ZPZZ(sve2_urhadd_zpzz_h, uint16_t, H1_2, DO_RHADD_BHS)
+DO_ZPZZ(sve2_urhadd_zpzz_s, uint32_t, H1_4, DO_RHADD_BHS)
+DO_ZPZZ_D(sve2_urhadd_zpzz_d, uint64_t, DO_RHADD_D)
+
+#define DO_HSUB_BHS(n, m) (((int64_t)n - m) >> 1)
+#define DO_HSUB_D(n, m) ((n >> 1) - (m >> 1) - (~n & m & 1))
+
+DO_ZPZZ(sve2_shsub_zpzz_b, int8_t, H1, DO_HSUB_BHS)
+DO_ZPZZ(sve2_shsub_zpzz_h, int16_t, H1_2, DO_HSUB_BHS)
+DO_ZPZZ(sve2_shsub_zpzz_s, int32_t, H1_4, DO_HSUB_BHS)
+DO_ZPZZ_D(sve2_shsub_zpzz_d, int64_t, DO_HSUB_D)
+
+DO_ZPZZ(sve2_uhsub_zpzz_b, uint8_t, H1, DO_HSUB_BHS)
+DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS)
+DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS)
+DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D)
+
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max)
+{
+ return val >= max ? max : val <= min ? min : val;
+}
+
+#define DO_SQADD_B(n, m) do_sat_bhs((int64_t)n + m, INT8_MIN, INT8_MAX)
+#define DO_SQADD_H(n, m) do_sat_bhs((int64_t)n + m, INT16_MIN, INT16_MAX)
+#define DO_SQADD_S(n, m) do_sat_bhs((int64_t)n + m, INT32_MIN, INT32_MAX)
+
+static inline int64_t do_sqadd_d(int64_t n, int64_t m)
+{
+ int64_t r = n + m;
+ if (((r ^ n) & ~(n ^ m)) < 0) {
+ /* Signed overflow. */
+ return r < 0 ? INT64_MAX : INT64_MIN;
+ }
+ return r;
+}
+
+DO_ZPZZ(sve2_sqadd_zpzz_b, int8_t, H1, DO_SQADD_B)
+DO_ZPZZ(sve2_sqadd_zpzz_h, int16_t, H1_2, DO_SQADD_H)
+DO_ZPZZ(sve2_sqadd_zpzz_s, int32_t, H1_4, DO_SQADD_S)
+DO_ZPZZ_D(sve2_sqadd_zpzz_d, int64_t, do_sqadd_d)
+
+#define DO_UQADD_B(n, m) do_sat_bhs((int64_t)n + m, 0, UINT8_MAX)
+#define DO_UQADD_H(n, m) do_sat_bhs((int64_t)n + m, 0, UINT16_MAX)
+#define DO_UQADD_S(n, m) do_sat_bhs((int64_t)n + m, 0, UINT32_MAX)
+
+static inline uint64_t do_uqadd_d(uint64_t n, uint64_t m)
+{
+ uint64_t r = n + m;
+ return r < n ? UINT64_MAX : r;
+}
+
+DO_ZPZZ(sve2_uqadd_zpzz_b, uint8_t, H1, DO_UQADD_B)
+DO_ZPZZ(sve2_uqadd_zpzz_h, uint16_t, H1_2, DO_UQADD_H)
+DO_ZPZZ(sve2_uqadd_zpzz_s, uint32_t, H1_4, DO_UQADD_S)
+DO_ZPZZ_D(sve2_uqadd_zpzz_d, uint64_t, do_uqadd_d)
+
+#define DO_SQSUB_B(n, m) do_sat_bhs((int64_t)n - m, INT8_MIN, INT8_MAX)
+#define DO_SQSUB_H(n, m) do_sat_bhs((int64_t)n - m, INT16_MIN, INT16_MAX)
+#define DO_SQSUB_S(n, m) do_sat_bhs((int64_t)n - m, INT32_MIN, INT32_MAX)
+
+static inline int64_t do_sqsub_d(int64_t n, int64_t m)
+{
+ int64_t r = n - m;
+ if (((r ^ n) & (n ^ m)) < 0) {
+ /* Signed overflow. */
+ return r < 0 ? INT64_MAX : INT64_MIN;
+ }
+ return r;
+}
+
+DO_ZPZZ(sve2_sqsub_zpzz_b, int8_t, H1, DO_SQSUB_B)
+DO_ZPZZ(sve2_sqsub_zpzz_h, int16_t, H1_2, DO_SQSUB_H)
+DO_ZPZZ(sve2_sqsub_zpzz_s, int32_t, H1_4, DO_SQSUB_S)
+DO_ZPZZ_D(sve2_sqsub_zpzz_d, int64_t, do_sqsub_d)
+
+#define DO_UQSUB_B(n, m) do_sat_bhs((int64_t)n - m, 0, UINT8_MAX)
+#define DO_UQSUB_H(n, m) do_sat_bhs((int64_t)n - m, 0, UINT16_MAX)
+#define DO_UQSUB_S(n, m) do_sat_bhs((int64_t)n - m, 0, UINT32_MAX)
+
+static inline uint64_t do_uqsub_d(uint64_t n, uint64_t m)
+{
+ return n > m ? n - m : 0;
+}
+
+DO_ZPZZ(sve2_uqsub_zpzz_b, uint8_t, H1, DO_UQSUB_B)
+DO_ZPZZ(sve2_uqsub_zpzz_h, uint16_t, H1_2, DO_UQSUB_H)
+DO_ZPZZ(sve2_uqsub_zpzz_s, uint32_t, H1_4, DO_UQSUB_S)
+DO_ZPZZ_D(sve2_uqsub_zpzz_d, uint64_t, do_uqsub_d)
+
+#define DO_SUQADD_B(n, m) \
+ do_sat_bhs((int64_t)(int8_t)n + m, INT8_MIN, INT8_MAX)
+#define DO_SUQADD_H(n, m) \
+ do_sat_bhs((int64_t)(int16_t)n + m, INT16_MIN, INT16_MAX)
+#define DO_SUQADD_S(n, m) \
+ do_sat_bhs((int64_t)(int32_t)n + m, INT32_MIN, INT32_MAX)
+
+static inline int64_t do_suqadd_d(int64_t n, uint64_t m)
+{
+ uint64_t r = n + m;
+
+ if (n < 0) {
+ /* Note that m - abs(n) cannot underflow. */
+ if (r > INT64_MAX) {
+ /* Result is either very large positive or negative. */
+ if (m > -n) {
+ /* m > abs(n), so r is a very large positive. */
+ return INT64_MAX;
+ }
+ /* Result is negative. */
+ }
+ } else {
+ /* Both inputs are positive: check for overflow. */
+ if (r < m || r > INT64_MAX) {
+ return INT64_MAX;
+ }
+ }
+ return r;
+}
+
+DO_ZPZZ(sve2_suqadd_zpzz_b, uint8_t, H1, DO_SUQADD_B)
+DO_ZPZZ(sve2_suqadd_zpzz_h, uint16_t, H1_2, DO_SUQADD_H)
+DO_ZPZZ(sve2_suqadd_zpzz_s, uint32_t, H1_4, DO_SUQADD_S)
+DO_ZPZZ_D(sve2_suqadd_zpzz_d, uint64_t, do_suqadd_d)
+
+#define DO_USQADD_B(n, m) \
+ do_sat_bhs((int64_t)n + (int8_t)m, 0, UINT8_MAX)
+#define DO_USQADD_H(n, m) \
+ do_sat_bhs((int64_t)n + (int16_t)m, 0, UINT16_MAX)
+#define DO_USQADD_S(n, m) \
+ do_sat_bhs((int64_t)n + (int32_t)m, 0, UINT32_MAX)
+
+static inline uint64_t do_usqadd_d(uint64_t n, int64_t m)
+{
+ uint64_t r = n + m;
+
+ if (m < 0) {
+ return n < -m ? 0 : r;
+ }
+ return r < n ? UINT64_MAX : r;
+}
+
+DO_ZPZZ(sve2_usqadd_zpzz_b, uint8_t, H1, DO_USQADD_B)
+DO_ZPZZ(sve2_usqadd_zpzz_h, uint16_t, H1_2, DO_USQADD_H)
+DO_ZPZZ(sve2_usqadd_zpzz_s, uint32_t, H1_4, DO_USQADD_S)
+DO_ZPZZ_D(sve2_usqadd_zpzz_d, uint64_t, do_usqadd_d)
+
#undef DO_ZPZZ
#undef DO_ZPZZ_D
+/*
+ * Three operand expander, operating on element pairs.
+ * If the slot I is even, the elements from from VN {I, I+1}.
+ * If the slot I is odd, the elements from from VM {I-1, I}.
+ * Load all of the input elements in each pair before overwriting output.
+ */
+#define DO_ZPZZ_PAIR(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ TYPE n0 = *(TYPE *)(vn + H(i)); \
+ TYPE m0 = *(TYPE *)(vm + H(i)); \
+ TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
+ TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(n0, n1); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(m0, m1); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+/* Similarly, specialized for 64-bit operands. */
+#define DO_ZPZZ_PAIR_D(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ uint8_t *pg = vg; \
+ for (i = 0; i < opr_sz; i += 2) { \
+ TYPE n0 = n[i], n1 = n[i + 1]; \
+ TYPE m0 = m[i], m1 = m[i + 1]; \
+ if (pg[H1(i)] & 1) { \
+ d[i] = OP(n0, n1); \
+ } \
+ if (pg[H1(i + 1)] & 1) { \
+ d[i + 1] = OP(m0, m1); \
+ } \
+ } \
+}
+
+DO_ZPZZ_PAIR(sve2_addp_zpzz_b, uint8_t, H1, DO_ADD)
+DO_ZPZZ_PAIR(sve2_addp_zpzz_h, uint16_t, H1_2, DO_ADD)
+DO_ZPZZ_PAIR(sve2_addp_zpzz_s, uint32_t, H1_4, DO_ADD)
+DO_ZPZZ_PAIR_D(sve2_addp_zpzz_d, uint64_t, DO_ADD)
+
+DO_ZPZZ_PAIR(sve2_umaxp_zpzz_b, uint8_t, H1, DO_MAX)
+DO_ZPZZ_PAIR(sve2_umaxp_zpzz_h, uint16_t, H1_2, DO_MAX)
+DO_ZPZZ_PAIR(sve2_umaxp_zpzz_s, uint32_t, H1_4, DO_MAX)
+DO_ZPZZ_PAIR_D(sve2_umaxp_zpzz_d, uint64_t, DO_MAX)
+
+DO_ZPZZ_PAIR(sve2_uminp_zpzz_b, uint8_t, H1, DO_MIN)
+DO_ZPZZ_PAIR(sve2_uminp_zpzz_h, uint16_t, H1_2, DO_MIN)
+DO_ZPZZ_PAIR(sve2_uminp_zpzz_s, uint32_t, H1_4, DO_MIN)
+DO_ZPZZ_PAIR_D(sve2_uminp_zpzz_d, uint64_t, DO_MIN)
+
+DO_ZPZZ_PAIR(sve2_smaxp_zpzz_b, int8_t, H1, DO_MAX)
+DO_ZPZZ_PAIR(sve2_smaxp_zpzz_h, int16_t, H1_2, DO_MAX)
+DO_ZPZZ_PAIR(sve2_smaxp_zpzz_s, int32_t, H1_4, DO_MAX)
+DO_ZPZZ_PAIR_D(sve2_smaxp_zpzz_d, int64_t, DO_MAX)
+
+DO_ZPZZ_PAIR(sve2_sminp_zpzz_b, int8_t, H1, DO_MIN)
+DO_ZPZZ_PAIR(sve2_sminp_zpzz_h, int16_t, H1_2, DO_MIN)
+DO_ZPZZ_PAIR(sve2_sminp_zpzz_s, int32_t, H1_4, DO_MIN)
+DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN)
+
+#undef DO_ZPZZ_PAIR
+#undef DO_ZPZZ_PAIR_D
+
+#define DO_ZPZZ_PAIR_FP(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
+ void *status, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ TYPE n0 = *(TYPE *)(vn + H(i)); \
+ TYPE m0 = *(TYPE *)(vm + H(i)); \
+ TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
+ TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(n0, n1, status); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(m0, m1, status); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_h, float16, H1_2, float16_add)
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_s, float32, H1_4, float32_add)
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_d, float64, , float64_add)
+
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_h, float16, H1_2, float16_maxnum)
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_s, float32, H1_4, float32_maxnum)
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_d, float64, , float64_maxnum)
+
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_h, float16, H1_2, float16_minnum)
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_s, float32, H1_4, float32_minnum)
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_d, float64, , float64_minnum)
+
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_h, float16, H1_2, float16_max)
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_s, float32, H1_4, float32_max)
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_d, float64, , float64_max)
+
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_h, float16, H1_2, float16_min)
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_s, float32, H1_4, float32_min)
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_d, float64, , float64_min)
+
+#undef DO_ZPZZ_PAIR_FP
+
/* Three-operand expander, controlled by a predicate, in which the
* third operand is "wide". That is, for D = N op M, the same 64-bit
* value of M is used with all of the narrower values of N.
@@ -684,6 +1089,27 @@ DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
DO_ZPZ_D(sve_rbit_d, uint64_t, revbit64)
+#define DO_SQABS(X) \
+ ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \
+ x_ >= 0 ? x_ : x_ == min_ ? -min_ - 1 : -x_; })
+
+DO_ZPZ(sve2_sqabs_b, int8_t, H1, DO_SQABS)
+DO_ZPZ(sve2_sqabs_h, int16_t, H1_2, DO_SQABS)
+DO_ZPZ(sve2_sqabs_s, int32_t, H1_4, DO_SQABS)
+DO_ZPZ_D(sve2_sqabs_d, int64_t, DO_SQABS)
+
+#define DO_SQNEG(X) \
+ ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \
+ x_ == min_ ? -min_ - 1 : -x_; })
+
+DO_ZPZ(sve2_sqneg_b, uint8_t, H1, DO_SQNEG)
+DO_ZPZ(sve2_sqneg_h, uint16_t, H1_2, DO_SQNEG)
+DO_ZPZ(sve2_sqneg_s, uint32_t, H1_4, DO_SQNEG)
+DO_ZPZ_D(sve2_sqneg_d, uint64_t, DO_SQNEG)
+
+DO_ZPZ(sve2_urecpe_s, uint32_t, H1_4, helper_recpe_u32)
+DO_ZPZ(sve2_ursqrte_s, uint32_t, H1_4, helper_rsqrte_u32)
+
/* Three-operand expander, unpredicated, in which the third operand is "wide".
*/
#define DO_ZZW(NAME, TYPE, TYPEW, H, OP) \
@@ -726,6 +1152,709 @@ DO_ZZW(sve_lsl_zzw_s, uint32_t, uint64_t, H1_4, DO_LSL)
#undef DO_ZPZ
#undef DO_ZPZ_D
+/*
+ * Three-operand expander, unpredicated, in which the two inputs are
+ * selected from the top or bottom half of the wide column.
+ */
+#define DO_ZZZ_TB(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
+ *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \
+ } \
+}
+
+DO_ZZZ_TB(sve2_saddl_h, int16_t, int8_t, H1_2, H1, DO_ADD)
+DO_ZZZ_TB(sve2_saddl_s, int32_t, int16_t, H1_4, H1_2, DO_ADD)
+DO_ZZZ_TB(sve2_saddl_d, int64_t, int32_t, , H1_4, DO_ADD)
+
+DO_ZZZ_TB(sve2_ssubl_h, int16_t, int8_t, H1_2, H1, DO_SUB)
+DO_ZZZ_TB(sve2_ssubl_s, int32_t, int16_t, H1_4, H1_2, DO_SUB)
+DO_ZZZ_TB(sve2_ssubl_d, int64_t, int32_t, , H1_4, DO_SUB)
+
+DO_ZZZ_TB(sve2_sabdl_h, int16_t, int8_t, H1_2, H1, DO_ABD)
+DO_ZZZ_TB(sve2_sabdl_s, int32_t, int16_t, H1_4, H1_2, DO_ABD)
+DO_ZZZ_TB(sve2_sabdl_d, int64_t, int32_t, , H1_4, DO_ABD)
+
+DO_ZZZ_TB(sve2_uaddl_h, uint16_t, uint8_t, H1_2, H1, DO_ADD)
+DO_ZZZ_TB(sve2_uaddl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD)
+DO_ZZZ_TB(sve2_uaddl_d, uint64_t, uint32_t, , H1_4, DO_ADD)
+
+DO_ZZZ_TB(sve2_usubl_h, uint16_t, uint8_t, H1_2, H1, DO_SUB)
+DO_ZZZ_TB(sve2_usubl_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB)
+DO_ZZZ_TB(sve2_usubl_d, uint64_t, uint32_t, , H1_4, DO_SUB)
+
+DO_ZZZ_TB(sve2_uabdl_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
+DO_ZZZ_TB(sve2_uabdl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
+DO_ZZZ_TB(sve2_uabdl_d, uint64_t, uint32_t, , H1_4, DO_ABD)
+
+DO_ZZZ_TB(sve2_smull_zzz_h, int16_t, int8_t, H1_2, H1, DO_MUL)
+DO_ZZZ_TB(sve2_smull_zzz_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
+DO_ZZZ_TB(sve2_smull_zzz_d, int64_t, int32_t, , H1_4, DO_MUL)
+
+DO_ZZZ_TB(sve2_umull_zzz_h, uint16_t, uint8_t, H1_2, H1, DO_MUL)
+DO_ZZZ_TB(sve2_umull_zzz_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
+DO_ZZZ_TB(sve2_umull_zzz_d, uint64_t, uint32_t, , H1_4, DO_MUL)
+
+/* Note that the multiply cannot overflow, but the doubling can. */
+static inline int16_t do_sqdmull_h(int16_t n, int16_t m)
+{
+ int16_t val = n * m;
+ return DO_SQADD_H(val, val);
+}
+
+static inline int32_t do_sqdmull_s(int32_t n, int32_t m)
+{
+ int32_t val = n * m;
+ return DO_SQADD_S(val, val);
+}
+
+static inline int64_t do_sqdmull_d(int64_t n, int64_t m)
+{
+ int64_t val = n * m;
+ return do_sqadd_d(val, val);
+}
+
+DO_ZZZ_TB(sve2_sqdmull_zzz_h, int16_t, int8_t, H1_2, H1, do_sqdmull_h)
+DO_ZZZ_TB(sve2_sqdmull_zzz_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s)
+DO_ZZZ_TB(sve2_sqdmull_zzz_d, int64_t, int32_t, , H1_4, do_sqdmull_d)
+
+#undef DO_ZZZ_TB
+
+#define DO_ZZZ_WTB(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int sel2 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
+ *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \
+ } \
+}
+
+DO_ZZZ_WTB(sve2_saddw_h, int16_t, int8_t, H1_2, H1, DO_ADD)
+DO_ZZZ_WTB(sve2_saddw_s, int32_t, int16_t, H1_4, H1_2, DO_ADD)
+DO_ZZZ_WTB(sve2_saddw_d, int64_t, int32_t, , H1_4, DO_ADD)
+
+DO_ZZZ_WTB(sve2_ssubw_h, int16_t, int8_t, H1_2, H1, DO_SUB)
+DO_ZZZ_WTB(sve2_ssubw_s, int32_t, int16_t, H1_4, H1_2, DO_SUB)
+DO_ZZZ_WTB(sve2_ssubw_d, int64_t, int32_t, , H1_4, DO_SUB)
+
+DO_ZZZ_WTB(sve2_uaddw_h, uint16_t, uint8_t, H1_2, H1, DO_ADD)
+DO_ZZZ_WTB(sve2_uaddw_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD)
+DO_ZZZ_WTB(sve2_uaddw_d, uint64_t, uint32_t, , H1_4, DO_ADD)
+
+DO_ZZZ_WTB(sve2_usubw_h, uint16_t, uint8_t, H1_2, H1, DO_SUB)
+DO_ZZZ_WTB(sve2_usubw_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB)
+DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, , H1_4, DO_SUB)
+
+#undef DO_ZZZ_WTB
+
+#define DO_ZZZ_NTB(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ intptr_t sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPE); \
+ intptr_t sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPE); \
+ for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + H(i + sel1)); \
+ TYPE mm = *(TYPE *)(vm + H(i + sel2)); \
+ *(TYPE *)(vd + H(i + sel1)) = OP(nn, mm); \
+ } \
+}
+
+DO_ZZZ_NTB(sve2_eoril_b, uint8_t, H1, DO_EOR)
+DO_ZZZ_NTB(sve2_eoril_h, uint16_t, H1_2, DO_EOR)
+DO_ZZZ_NTB(sve2_eoril_s, uint32_t, H1_4, DO_EOR)
+DO_ZZZ_NTB(sve2_eoril_d, uint64_t, , DO_EOR)
+
+#undef DO_ZZZ_NTB
+
+#define DO_ZZZW_ACC(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ intptr_t sel1 = simd_data(desc) * sizeof(TYPEN); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel1)); \
+ TYPEW aa = *(TYPEW *)(va + HW(i)); \
+ *(TYPEW *)(vd + HW(i)) = OP(nn, mm) + aa; \
+ } \
+}
+
+DO_ZZZW_ACC(sve2_sabal_h, int16_t, int8_t, H1_2, H1, DO_ABD)
+DO_ZZZW_ACC(sve2_sabal_s, int32_t, int16_t, H1_4, H1_2, DO_ABD)
+DO_ZZZW_ACC(sve2_sabal_d, int64_t, int32_t, , H1_4, DO_ABD)
+
+DO_ZZZW_ACC(sve2_uabal_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
+DO_ZZZW_ACC(sve2_uabal_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
+DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, , H1_4, DO_ABD)
+
+DO_ZZZW_ACC(sve2_smlal_zzzw_h, int16_t, int8_t, H1_2, H1, DO_MUL)
+DO_ZZZW_ACC(sve2_smlal_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
+DO_ZZZW_ACC(sve2_smlal_zzzw_d, int64_t, int32_t, , H1_4, DO_MUL)
+
+DO_ZZZW_ACC(sve2_umlal_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_MUL)
+DO_ZZZW_ACC(sve2_umlal_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
+DO_ZZZW_ACC(sve2_umlal_zzzw_d, uint64_t, uint32_t, , H1_4, DO_MUL)
+
+#define DO_NMUL(N, M) -(N * M)
+
+DO_ZZZW_ACC(sve2_smlsl_zzzw_h, int16_t, int8_t, H1_2, H1, DO_NMUL)
+DO_ZZZW_ACC(sve2_smlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_NMUL)
+DO_ZZZW_ACC(sve2_smlsl_zzzw_d, int64_t, int32_t, , H1_4, DO_NMUL)
+
+DO_ZZZW_ACC(sve2_umlsl_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_NMUL)
+DO_ZZZW_ACC(sve2_umlsl_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_NMUL)
+DO_ZZZW_ACC(sve2_umlsl_zzzw_d, uint64_t, uint32_t, , H1_4, DO_NMUL)
+
+#undef DO_ZZZW_ACC
+
+#define DO_XTNB(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + i); \
+ nn = OP(nn) & MAKE_64BIT_MASK(0, sizeof(TYPE) * 4); \
+ *(TYPE *)(vd + i) = nn; \
+ } \
+}
+
+#define DO_XTNT(NAME, TYPE, TYPEN, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc), odd = H(sizeof(TYPEN)); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + i); \
+ *(TYPEN *)(vd + i + odd) = OP(nn); \
+ } \
+}
+
+#define DO_SQXTN_H(n) do_sat_bhs(n, INT8_MIN, INT8_MAX)
+#define DO_SQXTN_S(n) do_sat_bhs(n, INT16_MIN, INT16_MAX)
+#define DO_SQXTN_D(n) do_sat_bhs(n, INT32_MIN, INT32_MAX)
+
+DO_XTNB(sve2_sqxtnb_h, int16_t, DO_SQXTN_H)
+DO_XTNB(sve2_sqxtnb_s, int32_t, DO_SQXTN_S)
+DO_XTNB(sve2_sqxtnb_d, int64_t, DO_SQXTN_D)
+
+DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, DO_SQXTN_H)
+DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, DO_SQXTN_S)
+DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, DO_SQXTN_D)
+
+#define DO_UQXTN_H(n) do_sat_bhs(n, 0, UINT8_MAX)
+#define DO_UQXTN_S(n) do_sat_bhs(n, 0, UINT16_MAX)
+#define DO_UQXTN_D(n) do_sat_bhs(n, 0, UINT32_MAX)
+
+DO_XTNB(sve2_uqxtnb_h, uint16_t, DO_UQXTN_H)
+DO_XTNB(sve2_uqxtnb_s, uint32_t, DO_UQXTN_S)
+DO_XTNB(sve2_uqxtnb_d, uint64_t, DO_UQXTN_D)
+
+DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, DO_UQXTN_H)
+DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, DO_UQXTN_S)
+DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, DO_UQXTN_D)
+
+DO_XTNB(sve2_sqxtunb_h, int16_t, DO_UQXTN_H)
+DO_XTNB(sve2_sqxtunb_s, int32_t, DO_UQXTN_S)
+DO_XTNB(sve2_sqxtunb_d, int64_t, DO_UQXTN_D)
+
+DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, DO_UQXTN_H)
+DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, DO_UQXTN_S)
+DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, DO_UQXTN_D)
+
+#undef DO_XTNB
+#undef DO_XTNT
+
+void HELPER(sve2_adcl_s)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int sel = H4(extract32(desc, SIMD_DATA_SHIFT, 1));
+ uint32_t inv = -extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t *a = va, *n = vn;
+ uint64_t *d = vd, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ uint32_t e1 = a[2 * i + H4(0)];
+ uint32_t e2 = n[2 * i + sel] ^ inv;
+ uint64_t c = extract64(m[i], 32, 1);
+ /* Compute and store the entire 33-bit result at once. */
+ d[i] = c + e1 + e2;
+ }
+}
+
+void HELPER(sve2_adcl_d)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int sel = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t inv = -(uint64_t)extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint64_t *d = vd, *a = va, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; i += 2) {
+ Int128 e1 = int128_make64(a[i]);
+ Int128 e2 = int128_make64(n[i + sel] ^ inv);
+ Int128 c = int128_make64(m[i + 1] & 1);
+ Int128 r = int128_add(int128_add(e1, e2), c);
+ d[i + 0] = int128_getlo(r);
+ d[i + 1] = int128_gethi(r);
+ }
+}
+
+#define DO_SQDMLAL(NAME, TYPEW, TYPEN, HW, HN, DMUL_OP, SUM_OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
+ TYPEW aa = *(TYPEW *)(va + HW(i)); \
+ *(TYPEW *)(vd + HW(i)) = SUM_OP(aa, DMUL_OP(nn, mm)); \
+ } \
+}
+
+DO_SQDMLAL(sve2_sqdmlal_zzzw_h, int16_t, int8_t, H1_2, H1,
+ do_sqdmull_h, DO_SQADD_H)
+DO_SQDMLAL(sve2_sqdmlal_zzzw_s, int32_t, int16_t, H1_4, H1_2,
+ do_sqdmull_s, DO_SQADD_S)
+DO_SQDMLAL(sve2_sqdmlal_zzzw_d, int64_t, int32_t, , H1_4,
+ do_sqdmull_d, do_sqadd_d)
+
+DO_SQDMLAL(sve2_sqdmlsl_zzzw_h, int16_t, int8_t, H1_2, H1,
+ do_sqdmull_h, DO_SQSUB_H)
+DO_SQDMLAL(sve2_sqdmlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2,
+ do_sqdmull_s, DO_SQSUB_S)
+DO_SQDMLAL(sve2_sqdmlsl_zzzw_d, int64_t, int32_t, , H1_4,
+ do_sqdmull_d, do_sqsub_d)
+
+#undef DO_SQDMLAL
+
+#define DO_CMLA_FUNC(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \
+ int rot = simd_data(desc); \
+ int sel_a = rot & 1, sel_b = sel_a ^ 1; \
+ bool sub_r = rot == 1 || rot == 2; \
+ bool sub_i = rot >= 2; \
+ TYPE *d = vd, *n = vn, *m = vm, *a = va; \
+ for (i = 0; i < opr_sz; i += 2) { \
+ TYPE elt1_a = n[H(i + sel_a)]; \
+ TYPE elt2_a = m[H(i + sel_a)]; \
+ TYPE elt2_b = m[H(i + sel_b)]; \
+ d[H(i)] = OP(elt1_a, elt2_a, a[H(i)], sub_r); \
+ d[H(i + 1)] = OP(elt1_a, elt2_b, a[H(i + 1)], sub_i); \
+ } \
+}
+
+#define DO_CMLA(N, M, A, S) (A + (N * M) * (S ? -1 : 1))
+
+DO_CMLA_FUNC(sve2_cmla_zzzz_b, uint8_t, H1, DO_CMLA)
+DO_CMLA_FUNC(sve2_cmla_zzzz_h, uint16_t, H2, DO_CMLA)
+DO_CMLA_FUNC(sve2_cmla_zzzz_s, uint32_t, H4, DO_CMLA)
+DO_CMLA_FUNC(sve2_cmla_zzzz_d, uint64_t, , DO_CMLA)
+
+#define DO_SQRDMLAH_B(N, M, A, S) \
+ do_sqrdmlah_b(N, M, A, S, true)
+#define DO_SQRDMLAH_H(N, M, A, S) \
+ ({ uint32_t discard; do_sqrdmlah_h(N, M, A, S, true, &discard); })
+#define DO_SQRDMLAH_S(N, M, A, S) \
+ ({ uint32_t discard; do_sqrdmlah_s(N, M, A, S, true, &discard); })
+#define DO_SQRDMLAH_D(N, M, A, S) \
+ do_sqrdmlah_d(N, M, A, S, true)
+
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_b, int8_t, H1, DO_SQRDMLAH_B)
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_h, int16_t, H2, DO_SQRDMLAH_H)
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_s, int32_t, H4, DO_SQRDMLAH_S)
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, , DO_SQRDMLAH_D)
+
+#define DO_CMLA_IDX_FUNC(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ int rot = extract32(desc, SIMD_DATA_SHIFT, 2); \
+ int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2) * 2; \
+ int sel_a = rot & 1, sel_b = sel_a ^ 1; \
+ bool sub_r = rot == 1 || rot == 2; \
+ bool sub_i = rot >= 2; \
+ TYPE *d = vd, *n = vn, *m = vm, *a = va; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += 16 / sizeof(TYPE)) { \
+ TYPE elt2_a = m[H(i + idx + sel_a)]; \
+ TYPE elt2_b = m[H(i + idx + sel_b)]; \
+ for (j = 0; j < 16 / sizeof(TYPE); j += 2) { \
+ TYPE elt1_a = n[H(i + j + sel_a)]; \
+ d[H2(i + j)] = OP(elt1_a, elt2_a, a[H(i + j)], sub_r); \
+ d[H2(i + j + 1)] = OP(elt1_a, elt2_b, a[H(i + j + 1)], sub_i); \
+ } \
+ } \
+}
+
+DO_CMLA_IDX_FUNC(sve2_cmla_idx_h, int16_t, H2, DO_CMLA)
+DO_CMLA_IDX_FUNC(sve2_cmla_idx_s, int32_t, H4, DO_CMLA)
+
+DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H)
+DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S)
+
+#undef DO_CMLA
+#undef DO_CMLA_FUNC
+#undef DO_CMLA_IDX_FUNC
+#undef DO_SQRDMLAH_B
+#undef DO_SQRDMLAH_H
+#undef DO_SQRDMLAH_S
+#undef DO_SQRDMLAH_D
+
+/* Note N and M are 4 elements bundled into one unit. */
+static int32_t do_cdot_s(uint32_t n, uint32_t m, int32_t a,
+ int sel_a, int sel_b, int sub_i)
+{
+ for (int i = 0; i <= 1; i++) {
+ int32_t elt1_r = (int8_t)(n >> (16 * i));
+ int32_t elt1_i = (int8_t)(n >> (16 * i + 8));
+ int32_t elt2_a = (int8_t)(m >> (16 * i + 8 * sel_a));
+ int32_t elt2_b = (int8_t)(m >> (16 * i + 8 * sel_b));
+
+ a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i;
+ }
+ return a;
+}
+
+static int64_t do_cdot_d(uint64_t n, uint64_t m, int64_t a,
+ int sel_a, int sel_b, int sub_i)
+{
+ for (int i = 0; i <= 1; i++) {
+ int64_t elt1_r = (int16_t)(n >> (32 * i + 0));
+ int64_t elt1_i = (int16_t)(n >> (32 * i + 16));
+ int64_t elt2_a = (int16_t)(m >> (32 * i + 16 * sel_a));
+ int64_t elt2_b = (int16_t)(m >> (32 * i + 16 * sel_b));
+
+ a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i;
+ }
+ return a;
+}
+
+void HELPER(sve2_cdot_zzzz_s)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ int opr_sz = simd_oprsz(desc);
+ int rot = simd_data(desc);
+ int sel_a = rot & 1;
+ int sel_b = sel_a ^ 1;
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
+ uint32_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (int e = 0; e < opr_sz / 4; e++) {
+ d[e] = do_cdot_s(n[e], m[e], a[e], sel_a, sel_b, sub_i);
+ }
+}
+
+void HELPER(sve2_cdot_zzzz_d)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ int opr_sz = simd_oprsz(desc);
+ int rot = simd_data(desc);
+ int sel_a = rot & 1;
+ int sel_b = sel_a ^ 1;
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
+ uint64_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (int e = 0; e < opr_sz / 8; e++) {
+ d[e] = do_cdot_d(n[e], m[e], a[e], sel_a, sel_b, sub_i);
+ }
+}
+
+void HELPER(sve2_cdot_idx_s)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ int opr_sz = simd_oprsz(desc);
+ int rot = extract32(desc, SIMD_DATA_SHIFT, 2);
+ int idx = H4(extract32(desc, SIMD_DATA_SHIFT + 2, 2));
+ int sel_a = rot & 1;
+ int sel_b = sel_a ^ 1;
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
+ uint32_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (int seg = 0; seg < opr_sz / 4; seg += 4) {
+ uint32_t seg_m = m[seg + idx];
+ for (int e = 0; e < 4; e++) {
+ d[seg + e] = do_cdot_s(n[seg + e], seg_m, a[seg + e],
+ sel_a, sel_b, sub_i);
+ }
+ }
+}
+
+void HELPER(sve2_cdot_idx_d)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ int seg, opr_sz = simd_oprsz(desc);
+ int rot = extract32(desc, SIMD_DATA_SHIFT, 2);
+ int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
+ int sel_a = rot & 1;
+ int sel_b = sel_a ^ 1;
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
+ uint64_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (seg = 0; seg < opr_sz / 8; seg += 2) {
+ uint64_t seg_m = m[seg + idx];
+ for (int e = 0; e < 2; e++) {
+ d[seg + e] = do_cdot_d(n[seg + e], seg_m, a[seg + e],
+ sel_a, sel_b, sub_i);
+ }
+ }
+}
+
+#define DO_ZZXZ(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
+ intptr_t i, j, idx = simd_data(desc); \
+ TYPE *d = vd, *a = va, *n = vn, *m = (TYPE *)vm + H(idx); \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
+ TYPE mm = m[i]; \
+ for (j = 0; j < segment; j++) { \
+ d[i + j] = OP(n[i + j], mm, a[i + j]); \
+ } \
+ } \
+}
+
+#define DO_SQRDMLAH_H(N, M, A) \
+ ({ uint32_t discard; do_sqrdmlah_h(N, M, A, false, true, &discard); })
+#define DO_SQRDMLAH_S(N, M, A) \
+ ({ uint32_t discard; do_sqrdmlah_s(N, M, A, false, true, &discard); })
+#define DO_SQRDMLAH_D(N, M, A) do_sqrdmlah_d(N, M, A, false, true)
+
+DO_ZZXZ(sve2_sqrdmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H)
+DO_ZZXZ(sve2_sqrdmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S)
+DO_ZZXZ(sve2_sqrdmlah_idx_d, int64_t, , DO_SQRDMLAH_D)
+
+#define DO_SQRDMLSH_H(N, M, A) \
+ ({ uint32_t discard; do_sqrdmlah_h(N, M, A, true, true, &discard); })
+#define DO_SQRDMLSH_S(N, M, A) \
+ ({ uint32_t discard; do_sqrdmlah_s(N, M, A, true, true, &discard); })
+#define DO_SQRDMLSH_D(N, M, A) do_sqrdmlah_d(N, M, A, true, true)
+
+DO_ZZXZ(sve2_sqrdmlsh_idx_h, int16_t, H2, DO_SQRDMLSH_H)
+DO_ZZXZ(sve2_sqrdmlsh_idx_s, int32_t, H4, DO_SQRDMLSH_S)
+DO_ZZXZ(sve2_sqrdmlsh_idx_d, int64_t, , DO_SQRDMLSH_D)
+
+#undef DO_ZZXZ
+
+#define DO_ZZXW(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \
+ for (i = 0; i < oprsz; i += 16) { \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \
+ for (j = 0; j < 16; j += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \
+ TYPEW aa = *(TYPEW *)(va + HW(i + j)); \
+ *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm, aa); \
+ } \
+ } \
+}
+
+#define DO_MLA(N, M, A) (A + N * M)
+
+DO_ZZXW(sve2_smlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLA)
+DO_ZZXW(sve2_smlal_idx_d, int64_t, int32_t, , H1_4, DO_MLA)
+DO_ZZXW(sve2_umlal_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLA)
+DO_ZZXW(sve2_umlal_idx_d, uint64_t, uint32_t, , H1_4, DO_MLA)
+
+#define DO_MLS(N, M, A) (A - N * M)
+
+DO_ZZXW(sve2_smlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLS)
+DO_ZZXW(sve2_smlsl_idx_d, int64_t, int32_t, , H1_4, DO_MLS)
+DO_ZZXW(sve2_umlsl_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLS)
+DO_ZZXW(sve2_umlsl_idx_d, uint64_t, uint32_t, , H1_4, DO_MLS)
+
+#define DO_SQDMLAL_S(N, M, A) DO_SQADD_S(A, do_sqdmull_s(N, M))
+#define DO_SQDMLAL_D(N, M, A) do_sqadd_d(A, do_sqdmull_d(N, M))
+
+DO_ZZXW(sve2_sqdmlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLAL_S)
+DO_ZZXW(sve2_sqdmlal_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLAL_D)
+
+#define DO_SQDMLSL_S(N, M, A) DO_SQSUB_S(A, do_sqdmull_s(N, M))
+#define DO_SQDMLSL_D(N, M, A) do_sqsub_d(A, do_sqdmull_d(N, M))
+
+DO_ZZXW(sve2_sqdmlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLSL_S)
+DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, , H1_4, DO_SQDMLSL_D)
+
+#undef DO_MLA
+#undef DO_MLS
+#undef DO_ZZXW
+
+#define DO_ZZX(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \
+ for (i = 0; i < oprsz; i += 16) { \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \
+ for (j = 0; j < 16; j += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \
+ *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm); \
+ } \
+ } \
+}
+
+DO_ZZX(sve2_sqdmull_idx_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s)
+DO_ZZX(sve2_sqdmull_idx_d, int64_t, int32_t, , H1_4, do_sqdmull_d)
+
+DO_ZZX(sve2_smull_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
+DO_ZZX(sve2_smull_idx_d, int64_t, int32_t, , H1_4, DO_MUL)
+
+DO_ZZX(sve2_umull_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
+DO_ZZX(sve2_umull_idx_d, uint64_t, uint32_t, , H1_4, DO_MUL)
+
+#undef DO_ZZX
+
+#define DO_BITPERM(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + i); \
+ TYPE mm = *(TYPE *)(vm + i); \
+ *(TYPE *)(vd + i) = OP(nn, mm, sizeof(TYPE) * 8); \
+ } \
+}
+
+static uint64_t bitextract(uint64_t data, uint64_t mask, int n)
+{
+ uint64_t res = 0;
+ int db, rb = 0;
+
+ for (db = 0; db < n; ++db) {
+ if ((mask >> db) & 1) {
+ res |= ((data >> db) & 1) << rb;
+ ++rb;
+ }
+ }
+ return res;
+}
+
+DO_BITPERM(sve2_bext_b, uint8_t, bitextract)
+DO_BITPERM(sve2_bext_h, uint16_t, bitextract)
+DO_BITPERM(sve2_bext_s, uint32_t, bitextract)
+DO_BITPERM(sve2_bext_d, uint64_t, bitextract)
+
+static uint64_t bitdeposit(uint64_t data, uint64_t mask, int n)
+{
+ uint64_t res = 0;
+ int rb, db = 0;
+
+ for (rb = 0; rb < n; ++rb) {
+ if ((mask >> rb) & 1) {
+ res |= ((data >> db) & 1) << rb;
+ ++db;
+ }
+ }
+ return res;
+}
+
+DO_BITPERM(sve2_bdep_b, uint8_t, bitdeposit)
+DO_BITPERM(sve2_bdep_h, uint16_t, bitdeposit)
+DO_BITPERM(sve2_bdep_s, uint32_t, bitdeposit)
+DO_BITPERM(sve2_bdep_d, uint64_t, bitdeposit)
+
+static uint64_t bitgroup(uint64_t data, uint64_t mask, int n)
+{
+ uint64_t resm = 0, resu = 0;
+ int db, rbm = 0, rbu = 0;
+
+ for (db = 0; db < n; ++db) {
+ uint64_t val = (data >> db) & 1;
+ if ((mask >> db) & 1) {
+ resm |= val << rbm++;
+ } else {
+ resu |= val << rbu++;
+ }
+ }
+
+ return resm | (resu << rbm);
+}
+
+DO_BITPERM(sve2_bgrp_b, uint8_t, bitgroup)
+DO_BITPERM(sve2_bgrp_h, uint16_t, bitgroup)
+DO_BITPERM(sve2_bgrp_s, uint32_t, bitgroup)
+DO_BITPERM(sve2_bgrp_d, uint64_t, bitgroup)
+
+#undef DO_BITPERM
+
+#define DO_CADD(NAME, TYPE, H, ADD_OP, SUB_OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int sub_r = simd_data(desc); \
+ if (sub_r) { \
+ for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
+ TYPE acc_r = *(TYPE *)(vn + H(i)); \
+ TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
+ TYPE el2_r = *(TYPE *)(vm + H(i)); \
+ TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
+ acc_r = ADD_OP(acc_r, el2_i); \
+ acc_i = SUB_OP(acc_i, el2_r); \
+ *(TYPE *)(vd + H(i)) = acc_r; \
+ *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \
+ } \
+ } else { \
+ for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
+ TYPE acc_r = *(TYPE *)(vn + H(i)); \
+ TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
+ TYPE el2_r = *(TYPE *)(vm + H(i)); \
+ TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
+ acc_r = SUB_OP(acc_r, el2_i); \
+ acc_i = ADD_OP(acc_i, el2_r); \
+ *(TYPE *)(vd + H(i)) = acc_r; \
+ *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \
+ } \
+ } \
+}
+
+DO_CADD(sve2_cadd_b, int8_t, H1, DO_ADD, DO_SUB)
+DO_CADD(sve2_cadd_h, int16_t, H1_2, DO_ADD, DO_SUB)
+DO_CADD(sve2_cadd_s, int32_t, H1_4, DO_ADD, DO_SUB)
+DO_CADD(sve2_cadd_d, int64_t, , DO_ADD, DO_SUB)
+
+DO_CADD(sve2_sqcadd_b, int8_t, H1, DO_SQADD_B, DO_SQSUB_B)
+DO_CADD(sve2_sqcadd_h, int16_t, H1_2, DO_SQADD_H, DO_SQSUB_H)
+DO_CADD(sve2_sqcadd_s, int32_t, H1_4, DO_SQADD_S, DO_SQSUB_S)
+DO_CADD(sve2_sqcadd_d, int64_t, , do_sqadd_d, do_sqsub_d)
+
+#undef DO_CADD
+
+#define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ intptr_t sel = (simd_data(desc) & 1) * sizeof(TYPEN); \
+ int shift = simd_data(desc) >> 1; \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel)); \
+ *(TYPEW *)(vd + HW(i)) = nn << shift; \
+ } \
+}
+
+DO_ZZI_SHLL(sve2_sshll_h, int16_t, int8_t, H1_2, H1)
+DO_ZZI_SHLL(sve2_sshll_s, int32_t, int16_t, H1_4, H1_2)
+DO_ZZI_SHLL(sve2_sshll_d, int64_t, int32_t, , H1_4)
+
+DO_ZZI_SHLL(sve2_ushll_h, uint16_t, uint8_t, H1_2, H1)
+DO_ZZI_SHLL(sve2_ushll_s, uint32_t, uint16_t, H1_4, H1_2)
+DO_ZZI_SHLL(sve2_ushll_d, uint64_t, uint32_t, , H1_4)
+
+#undef DO_ZZI_SHLL
+
/* Two-operand reduction expander, controlled by a predicate.
* The difference between TYPERED and TYPERET has to do with
* sign-extension. E.g. for SMAX, TYPERED must be signed,
@@ -1052,6 +2181,27 @@ void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
when N is negative, add 2**M-1. */
#define DO_ASRD(N, M) ((N + (N < 0 ? ((__typeof(N))1 << M) - 1 : 0)) >> M)
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
+{
+ if (likely(sh < 64)) {
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
+ } else if (sh == 64) {
+ return x >> 63;
+ } else {
+ return 0;
+ }
+}
+
+static inline int64_t do_srshr(int64_t x, unsigned sh)
+{
+ if (likely(sh < 64)) {
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
+ } else {
+ /* Rounding the sign bit always produces 0. */
+ return 0;
+ }
+}
+
DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR)
DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR)
DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR)
@@ -1072,12 +2222,225 @@ DO_ZPZI(sve_asrd_h, int16_t, H1_2, DO_ASRD)
DO_ZPZI(sve_asrd_s, int32_t, H1_4, DO_ASRD)
DO_ZPZI_D(sve_asrd_d, int64_t, DO_ASRD)
-#undef DO_SHR
-#undef DO_SHL
+/* SVE2 bitwise shift by immediate */
+DO_ZPZI(sve2_sqshl_zpzi_b, int8_t, H1, do_sqshl_b)
+DO_ZPZI(sve2_sqshl_zpzi_h, int16_t, H1_2, do_sqshl_h)
+DO_ZPZI(sve2_sqshl_zpzi_s, int32_t, H1_4, do_sqshl_s)
+DO_ZPZI_D(sve2_sqshl_zpzi_d, int64_t, do_sqshl_d)
+
+DO_ZPZI(sve2_uqshl_zpzi_b, uint8_t, H1, do_uqshl_b)
+DO_ZPZI(sve2_uqshl_zpzi_h, uint16_t, H1_2, do_uqshl_h)
+DO_ZPZI(sve2_uqshl_zpzi_s, uint32_t, H1_4, do_uqshl_s)
+DO_ZPZI_D(sve2_uqshl_zpzi_d, uint64_t, do_uqshl_d)
+
+DO_ZPZI(sve2_srshr_b, int8_t, H1, do_srshr)
+DO_ZPZI(sve2_srshr_h, int16_t, H1_2, do_srshr)
+DO_ZPZI(sve2_srshr_s, int32_t, H1_4, do_srshr)
+DO_ZPZI_D(sve2_srshr_d, int64_t, do_srshr)
+
+DO_ZPZI(sve2_urshr_b, uint8_t, H1, do_urshr)
+DO_ZPZI(sve2_urshr_h, uint16_t, H1_2, do_urshr)
+DO_ZPZI(sve2_urshr_s, uint32_t, H1_4, do_urshr)
+DO_ZPZI_D(sve2_urshr_d, uint64_t, do_urshr)
+
+#define do_suqrshl_b(n, m) \
+ ({ uint32_t discard; do_suqrshl_bhs(n, (int8_t)m, 8, false, &discard); })
+#define do_suqrshl_h(n, m) \
+ ({ uint32_t discard; do_suqrshl_bhs(n, (int16_t)m, 16, false, &discard); })
+#define do_suqrshl_s(n, m) \
+ ({ uint32_t discard; do_suqrshl_bhs(n, m, 32, false, &discard); })
+#define do_suqrshl_d(n, m) \
+ ({ uint32_t discard; do_suqrshl_d(n, m, false, &discard); })
+
+DO_ZPZI(sve2_sqshlu_b, int8_t, H1, do_suqrshl_b)
+DO_ZPZI(sve2_sqshlu_h, int16_t, H1_2, do_suqrshl_h)
+DO_ZPZI(sve2_sqshlu_s, int32_t, H1_4, do_suqrshl_s)
+DO_ZPZI_D(sve2_sqshlu_d, int64_t, do_suqrshl_d)
+
#undef DO_ASRD
#undef DO_ZPZI
#undef DO_ZPZI_D
+#define DO_SHRNB(NAME, TYPEW, TYPEN, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + i); \
+ *(TYPEW *)(vd + i) = (TYPEN)OP(nn, shift); \
+ } \
+}
+
+#define DO_SHRNT(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
+ *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, shift); \
+ } \
+}
+
+DO_SHRNB(sve2_shrnb_h, uint16_t, uint8_t, DO_SHR)
+DO_SHRNB(sve2_shrnb_s, uint32_t, uint16_t, DO_SHR)
+DO_SHRNB(sve2_shrnb_d, uint64_t, uint32_t, DO_SHR)
+
+DO_SHRNT(sve2_shrnt_h, uint16_t, uint8_t, H1_2, H1, DO_SHR)
+DO_SHRNT(sve2_shrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_SHR)
+DO_SHRNT(sve2_shrnt_d, uint64_t, uint32_t, , H1_4, DO_SHR)
+
+DO_SHRNB(sve2_rshrnb_h, uint16_t, uint8_t, do_urshr)
+DO_SHRNB(sve2_rshrnb_s, uint32_t, uint16_t, do_urshr)
+DO_SHRNB(sve2_rshrnb_d, uint64_t, uint32_t, do_urshr)
+
+DO_SHRNT(sve2_rshrnt_h, uint16_t, uint8_t, H1_2, H1, do_urshr)
+DO_SHRNT(sve2_rshrnt_s, uint32_t, uint16_t, H1_4, H1_2, do_urshr)
+DO_SHRNT(sve2_rshrnt_d, uint64_t, uint32_t, , H1_4, do_urshr)
+
+#define DO_SQSHRUN_H(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT8_MAX)
+#define DO_SQSHRUN_S(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT16_MAX)
+#define DO_SQSHRUN_D(x, sh) \
+ do_sat_bhs((int64_t)(x) >> (sh < 64 ? sh : 63), 0, UINT32_MAX)
+
+DO_SHRNB(sve2_sqshrunb_h, int16_t, uint8_t, DO_SQSHRUN_H)
+DO_SHRNB(sve2_sqshrunb_s, int32_t, uint16_t, DO_SQSHRUN_S)
+DO_SHRNB(sve2_sqshrunb_d, int64_t, uint32_t, DO_SQSHRUN_D)
+
+DO_SHRNT(sve2_sqshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRUN_H)
+DO_SHRNT(sve2_sqshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRUN_S)
+DO_SHRNT(sve2_sqshrunt_d, int64_t, uint32_t, , H1_4, DO_SQSHRUN_D)
+
+#define DO_SQRSHRUN_H(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT8_MAX)
+#define DO_SQRSHRUN_S(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT16_MAX)
+#define DO_SQRSHRUN_D(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT32_MAX)
+
+DO_SHRNB(sve2_sqrshrunb_h, int16_t, uint8_t, DO_SQRSHRUN_H)
+DO_SHRNB(sve2_sqrshrunb_s, int32_t, uint16_t, DO_SQRSHRUN_S)
+DO_SHRNB(sve2_sqrshrunb_d, int64_t, uint32_t, DO_SQRSHRUN_D)
+
+DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H)
+DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S)
+DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D)
+
+#define DO_SQSHRN_H(x, sh) do_sat_bhs(x >> sh, INT8_MIN, INT8_MAX)
+#define DO_SQSHRN_S(x, sh) do_sat_bhs(x >> sh, INT16_MIN, INT16_MAX)
+#define DO_SQSHRN_D(x, sh) do_sat_bhs(x >> sh, INT32_MIN, INT32_MAX)
+
+DO_SHRNB(sve2_sqshrnb_h, int16_t, uint8_t, DO_SQSHRN_H)
+DO_SHRNB(sve2_sqshrnb_s, int32_t, uint16_t, DO_SQSHRN_S)
+DO_SHRNB(sve2_sqshrnb_d, int64_t, uint32_t, DO_SQSHRN_D)
+
+DO_SHRNT(sve2_sqshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRN_H)
+DO_SHRNT(sve2_sqshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRN_S)
+DO_SHRNT(sve2_sqshrnt_d, int64_t, uint32_t, , H1_4, DO_SQSHRN_D)
+
+#define DO_SQRSHRN_H(x, sh) do_sat_bhs(do_srshr(x, sh), INT8_MIN, INT8_MAX)
+#define DO_SQRSHRN_S(x, sh) do_sat_bhs(do_srshr(x, sh), INT16_MIN, INT16_MAX)
+#define DO_SQRSHRN_D(x, sh) do_sat_bhs(do_srshr(x, sh), INT32_MIN, INT32_MAX)
+
+DO_SHRNB(sve2_sqrshrnb_h, int16_t, uint8_t, DO_SQRSHRN_H)
+DO_SHRNB(sve2_sqrshrnb_s, int32_t, uint16_t, DO_SQRSHRN_S)
+DO_SHRNB(sve2_sqrshrnb_d, int64_t, uint32_t, DO_SQRSHRN_D)
+
+DO_SHRNT(sve2_sqrshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRN_H)
+DO_SHRNT(sve2_sqrshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRN_S)
+DO_SHRNT(sve2_sqrshrnt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRN_D)
+
+#define DO_UQSHRN_H(x, sh) MIN(x >> sh, UINT8_MAX)
+#define DO_UQSHRN_S(x, sh) MIN(x >> sh, UINT16_MAX)
+#define DO_UQSHRN_D(x, sh) MIN(x >> sh, UINT32_MAX)
+
+DO_SHRNB(sve2_uqshrnb_h, uint16_t, uint8_t, DO_UQSHRN_H)
+DO_SHRNB(sve2_uqshrnb_s, uint32_t, uint16_t, DO_UQSHRN_S)
+DO_SHRNB(sve2_uqshrnb_d, uint64_t, uint32_t, DO_UQSHRN_D)
+
+DO_SHRNT(sve2_uqshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQSHRN_H)
+DO_SHRNT(sve2_uqshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQSHRN_S)
+DO_SHRNT(sve2_uqshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQSHRN_D)
+
+#define DO_UQRSHRN_H(x, sh) MIN(do_urshr(x, sh), UINT8_MAX)
+#define DO_UQRSHRN_S(x, sh) MIN(do_urshr(x, sh), UINT16_MAX)
+#define DO_UQRSHRN_D(x, sh) MIN(do_urshr(x, sh), UINT32_MAX)
+
+DO_SHRNB(sve2_uqrshrnb_h, uint16_t, uint8_t, DO_UQRSHRN_H)
+DO_SHRNB(sve2_uqrshrnb_s, uint32_t, uint16_t, DO_UQRSHRN_S)
+DO_SHRNB(sve2_uqrshrnb_d, uint64_t, uint32_t, DO_UQRSHRN_D)
+
+DO_SHRNT(sve2_uqrshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQRSHRN_H)
+DO_SHRNT(sve2_uqrshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQRSHRN_S)
+DO_SHRNT(sve2_uqrshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQRSHRN_D)
+
+#undef DO_SHRNB
+#undef DO_SHRNT
+
+#define DO_BINOPNB(NAME, TYPEW, TYPEN, SHIFT, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + i); \
+ TYPEW mm = *(TYPEW *)(vm + i); \
+ *(TYPEW *)(vd + i) = (TYPEN)OP(nn, mm, SHIFT); \
+ } \
+}
+
+#define DO_BINOPNT(NAME, TYPEW, TYPEN, SHIFT, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
+ TYPEW mm = *(TYPEW *)(vm + HW(i)); \
+ *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, mm, SHIFT); \
+ } \
+}
+
+#define DO_ADDHN(N, M, SH) ((N + M) >> SH)
+#define DO_RADDHN(N, M, SH) ((N + M + ((__typeof(N))1 << (SH - 1))) >> SH)
+#define DO_SUBHN(N, M, SH) ((N - M) >> SH)
+#define DO_RSUBHN(N, M, SH) ((N - M + ((__typeof(N))1 << (SH - 1))) >> SH)
+
+DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN)
+DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN)
+DO_BINOPNB(sve2_addhnb_d, uint64_t, uint32_t, 32, DO_ADDHN)
+
+DO_BINOPNT(sve2_addhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_ADDHN)
+DO_BINOPNT(sve2_addhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_ADDHN)
+DO_BINOPNT(sve2_addhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_ADDHN)
+
+DO_BINOPNB(sve2_raddhnb_h, uint16_t, uint8_t, 8, DO_RADDHN)
+DO_BINOPNB(sve2_raddhnb_s, uint32_t, uint16_t, 16, DO_RADDHN)
+DO_BINOPNB(sve2_raddhnb_d, uint64_t, uint32_t, 32, DO_RADDHN)
+
+DO_BINOPNT(sve2_raddhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RADDHN)
+DO_BINOPNT(sve2_raddhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RADDHN)
+DO_BINOPNT(sve2_raddhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RADDHN)
+
+DO_BINOPNB(sve2_subhnb_h, uint16_t, uint8_t, 8, DO_SUBHN)
+DO_BINOPNB(sve2_subhnb_s, uint32_t, uint16_t, 16, DO_SUBHN)
+DO_BINOPNB(sve2_subhnb_d, uint64_t, uint32_t, 32, DO_SUBHN)
+
+DO_BINOPNT(sve2_subhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_SUBHN)
+DO_BINOPNT(sve2_subhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_SUBHN)
+DO_BINOPNT(sve2_subhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_SUBHN)
+
+DO_BINOPNB(sve2_rsubhnb_h, uint16_t, uint8_t, 8, DO_RSUBHN)
+DO_BINOPNB(sve2_rsubhnb_s, uint32_t, uint16_t, 16, DO_RSUBHN)
+DO_BINOPNB(sve2_rsubhnb_d, uint64_t, uint32_t, 32, DO_RSUBHN)
+
+DO_BINOPNT(sve2_rsubhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RSUBHN)
+DO_BINOPNT(sve2_rsubhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RSUBHN)
+DO_BINOPNT(sve2_rsubhnt_d, uint64_t, uint32_t, 32, , H1_4, DO_RSUBHN)
+
+#undef DO_RSUBHN
+#undef DO_SUBHN
+#undef DO_RADDHN
+#undef DO_ADDHN
+
+#undef DO_BINOPNB
+
/* Fully general four-operand expander, controlled by a predicate.
*/
#define DO_ZPZZZ(NAME, TYPE, H, OP) \
@@ -1356,13 +2719,7 @@ void HELPER(sve_sqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
intptr_t i, oprsz = simd_oprsz(desc);
for (i = 0; i < oprsz; i += sizeof(int8_t)) {
- int r = *(int8_t *)(a + i) + b;
- if (r > INT8_MAX) {
- r = INT8_MAX;
- } else if (r < INT8_MIN) {
- r = INT8_MIN;
- }
- *(int8_t *)(d + i) = r;
+ *(int8_t *)(d + i) = DO_SQADD_B(b, *(int8_t *)(a + i));
}
}
@@ -1371,13 +2728,7 @@ void HELPER(sve_sqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
intptr_t i, oprsz = simd_oprsz(desc);
for (i = 0; i < oprsz; i += sizeof(int16_t)) {
- int r = *(int16_t *)(a + i) + b;
- if (r > INT16_MAX) {
- r = INT16_MAX;
- } else if (r < INT16_MIN) {
- r = INT16_MIN;
- }
- *(int16_t *)(d + i) = r;
+ *(int16_t *)(d + i) = DO_SQADD_H(b, *(int16_t *)(a + i));
}
}
@@ -1386,13 +2737,7 @@ void HELPER(sve_sqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
intptr_t i, oprsz = simd_oprsz(desc);
for (i = 0; i < oprsz; i += sizeof(int32_t)) {
- int64_t r = *(int32_t *)(a + i) + b;
- if (r > INT32_MAX) {
- r = INT32_MAX;
- } else if (r < INT32_MIN) {
- r = INT32_MIN;
- }
- *(int32_t *)(d + i) = r;
+ *(int32_t *)(d + i) = DO_SQADD_S(b, *(int32_t *)(a + i));
}
}
@@ -1401,13 +2746,7 @@ void HELPER(sve_sqaddi_d)(void *d, void *a, int64_t b, uint32_t desc)
intptr_t i, oprsz = simd_oprsz(desc);
for (i = 0; i < oprsz; i += sizeof(int64_t)) {
- int64_t ai = *(int64_t *)(a + i);
- int64_t r = ai + b;
- if (((r ^ ai) & ~(ai ^ b)) < 0) {
- /* Signed overflow. */
- r = (r < 0 ? INT64_MAX : INT64_MIN);
- }
- *(int64_t *)(d + i) = r;
+ *(int64_t *)(d + i) = do_sqadd_d(b, *(int64_t *)(a + i));
}
}
@@ -1420,13 +2759,7 @@ void HELPER(sve_uqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
intptr_t i, oprsz = simd_oprsz(desc);
for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
- int r = *(uint8_t *)(a + i) + b;
- if (r > UINT8_MAX) {
- r = UINT8_MAX;
- } else if (r < 0) {
- r = 0;
- }
- *(uint8_t *)(d + i) = r;
+ *(uint8_t *)(d + i) = DO_UQADD_B(b, *(uint8_t *)(a + i));
}
}
@@ -1435,13 +2768,7 @@ void HELPER(sve_uqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
intptr_t i, oprsz = simd_oprsz(desc);
for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
- int r = *(uint16_t *)(a + i) + b;
- if (r > UINT16_MAX) {
- r = UINT16_MAX;
- } else if (r < 0) {
- r = 0;
- }
- *(uint16_t *)(d + i) = r;
+ *(uint16_t *)(d + i) = DO_UQADD_H(b, *(uint16_t *)(a + i));
}
}
@@ -1450,13 +2777,7 @@ void HELPER(sve_uqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
intptr_t i, oprsz = simd_oprsz(desc);
for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
- int64_t r = *(uint32_t *)(a + i) + b;
- if (r > UINT32_MAX) {
- r = UINT32_MAX;
- } else if (r < 0) {
- r = 0;
- }
- *(uint32_t *)(d + i) = r;
+ *(uint32_t *)(d + i) = DO_UQADD_S(b, *(uint32_t *)(a + i));
}
}
@@ -1465,11 +2786,7 @@ void HELPER(sve_uqaddi_d)(void *d, void *a, uint64_t b, uint32_t desc)
intptr_t i, oprsz = simd_oprsz(desc);
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
- uint64_t r = *(uint64_t *)(a + i) + b;
- if (r < b) {
- r = UINT64_MAX;
- }
- *(uint64_t *)(d + i) = r;
+ *(uint64_t *)(d + i) = do_uqadd_d(b, *(uint64_t *)(a + i));
}
}
@@ -1478,8 +2795,7 @@ void HELPER(sve_uqsubi_d)(void *d, void *a, uint64_t b, uint32_t desc)
intptr_t i, oprsz = simd_oprsz(desc);
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
- uint64_t ai = *(uint64_t *)(a + i);
- *(uint64_t *)(d + i) = (ai < b ? 0 : ai - b);
+ *(uint64_t *)(d + i) = do_uqsub_d(*(uint64_t *)(a + i), b);
}
}
@@ -1772,28 +3088,80 @@ void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc)
}
}
-#define DO_TBL(NAME, TYPE, H) \
-void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
-{ \
- intptr_t i, opr_sz = simd_oprsz(desc); \
- uintptr_t elem = opr_sz / sizeof(TYPE); \
- TYPE *d = vd, *n = vn, *m = vm; \
- ARMVectorReg tmp; \
- if (unlikely(vd == vn)) { \
- n = memcpy(&tmp, vn, opr_sz); \
- } \
- for (i = 0; i < elem; i++) { \
- TYPE j = m[H(i)]; \
- d[H(i)] = j < elem ? n[H(j)] : 0; \
- } \
+typedef void tb_impl_fn(void *, void *, void *, void *, uintptr_t, bool);
+
+static inline void do_tbl1(void *vd, void *vn, void *vm, uint32_t desc,
+ bool is_tbx, tb_impl_fn *fn)
+{
+ ARMVectorReg scratch;
+ uintptr_t oprsz = simd_oprsz(desc);
+
+ if (unlikely(vd == vn)) {
+ vn = memcpy(&scratch, vn, oprsz);
+ }
+
+ fn(vd, vn, NULL, vm, oprsz, is_tbx);
+}
+
+static inline void do_tbl2(void *vd, void *vn0, void *vn1, void *vm,
+ uint32_t desc, bool is_tbx, tb_impl_fn *fn)
+{
+ ARMVectorReg scratch;
+ uintptr_t oprsz = simd_oprsz(desc);
+
+ if (unlikely(vd == vn0)) {
+ vn0 = memcpy(&scratch, vn0, oprsz);
+ if (vd == vn1) {
+ vn1 = vn0;
+ }
+ } else if (unlikely(vd == vn1)) {
+ vn1 = memcpy(&scratch, vn1, oprsz);
+ }
+
+ fn(vd, vn0, vn1, vm, oprsz, is_tbx);
+}
+
+#define DO_TB(SUFF, TYPE, H) \
+static inline void do_tb_##SUFF(void *vd, void *vt0, void *vt1, \
+ void *vm, uintptr_t oprsz, bool is_tbx) \
+{ \
+ TYPE *d = vd, *tbl0 = vt0, *tbl1 = vt1, *indexes = vm; \
+ uintptr_t i, nelem = oprsz / sizeof(TYPE); \
+ for (i = 0; i < nelem; ++i) { \
+ TYPE index = indexes[H1(i)], val = 0; \
+ if (index < nelem) { \
+ val = tbl0[H(index)]; \
+ } else { \
+ index -= nelem; \
+ if (tbl1 && index < nelem) { \
+ val = tbl1[H(index)]; \
+ } else if (is_tbx) { \
+ continue; \
+ } \
+ } \
+ d[H(i)] = val; \
+ } \
+} \
+void HELPER(sve_tbl_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ do_tbl1(vd, vn, vm, desc, false, do_tb_##SUFF); \
+} \
+void HELPER(sve2_tbl_##SUFF)(void *vd, void *vn0, void *vn1, \
+ void *vm, uint32_t desc) \
+{ \
+ do_tbl2(vd, vn0, vn1, vm, desc, false, do_tb_##SUFF); \
+} \
+void HELPER(sve2_tbx_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ do_tbl1(vd, vn, vm, desc, true, do_tb_##SUFF); \
}
-DO_TBL(sve_tbl_b, uint8_t, H1)
-DO_TBL(sve_tbl_h, uint16_t, H2)
-DO_TBL(sve_tbl_s, uint32_t, H4)
-DO_TBL(sve_tbl_d, uint64_t, )
+DO_TB(b, uint8_t, H1)
+DO_TB(h, uint16_t, H2)
+DO_TB(s, uint32_t, H4)
+DO_TB(d, uint64_t, )
-#undef TBL
+#undef DO_TB
#define DO_UNPK(NAME, TYPED, TYPES, HD, HS) \
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
@@ -2143,36 +3511,45 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
*(TYPE *)(vd + H(2 * i + 0)) = *(TYPE *)(vn + H(i)); \
*(TYPE *)(vd + H(2 * i + sizeof(TYPE))) = *(TYPE *)(vm + H(i)); \
} \
+ if (sizeof(TYPE) == 16 && unlikely(oprsz & 16)) { \
+ memset(vd + oprsz - 16, 0, 16); \
+ } \
}
DO_ZIP(sve_zip_b, uint8_t, H1)
DO_ZIP(sve_zip_h, uint16_t, H1_2)
DO_ZIP(sve_zip_s, uint32_t, H1_4)
DO_ZIP(sve_zip_d, uint64_t, )
+DO_ZIP(sve2_zip_q, Int128, )
#define DO_UZP(NAME, TYPE, H) \
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
{ \
intptr_t oprsz = simd_oprsz(desc); \
- intptr_t oprsz_2 = oprsz / 2; \
intptr_t odd_ofs = simd_data(desc); \
- intptr_t i; \
+ intptr_t i, p; \
ARMVectorReg tmp_m; \
if (unlikely((vm - vd) < (uintptr_t)oprsz)) { \
vm = memcpy(&tmp_m, vm, oprsz); \
} \
- for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \
- *(TYPE *)(vd + H(i)) = *(TYPE *)(vn + H(2 * i + odd_ofs)); \
- } \
- for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \
- *(TYPE *)(vd + H(oprsz_2 + i)) = *(TYPE *)(vm + H(2 * i + odd_ofs)); \
- } \
+ i = 0, p = odd_ofs; \
+ do { \
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(vn + H(p)); \
+ i += sizeof(TYPE), p += 2 * sizeof(TYPE); \
+ } while (p < oprsz); \
+ p -= oprsz; \
+ do { \
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(vm + H(p)); \
+ i += sizeof(TYPE), p += 2 * sizeof(TYPE); \
+ } while (p < oprsz); \
+ tcg_debug_assert(i == oprsz); \
}
DO_UZP(sve_uzp_b, uint8_t, H1)
DO_UZP(sve_uzp_h, uint16_t, H1_2)
DO_UZP(sve_uzp_s, uint32_t, H1_4)
DO_UZP(sve_uzp_d, uint64_t, )
+DO_UZP(sve2_uzp_q, Int128, )
#define DO_TRN(NAME, TYPE, H) \
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
@@ -2186,12 +3563,16 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
*(TYPE *)(vd + H(i + 0)) = ae; \
*(TYPE *)(vd + H(i + sizeof(TYPE))) = be; \
} \
+ if (sizeof(TYPE) == 16 && unlikely(oprsz & 16)) { \
+ memset(vd + oprsz - 16, 0, 16); \
+ } \
}
DO_TRN(sve_trn_b, uint8_t, H1)
DO_TRN(sve_trn_h, uint16_t, H1_2)
DO_TRN(sve_trn_s, uint32_t, H1_4)
DO_TRN(sve_trn_d, uint64_t, )
+DO_TRN(sve2_trn_q, Int128, )
#undef DO_ZIP
#undef DO_UZP
@@ -2848,7 +4229,7 @@ uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc)
return sum;
}
-uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc)
+uint32_t HELPER(sve_whilel)(void *vd, uint32_t count, uint32_t pred_desc)
{
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
@@ -2874,6 +4255,42 @@ uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc)
return predtest_ones(d, oprsz, esz_mask);
}
+uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint64_t esz_mask = pred_esz_masks[esz];
+ ARMPredicateReg *d = vd;
+ intptr_t i, invcount, oprbits;
+ uint64_t bits;
+
+ if (count == 0) {
+ return do_zero(d, oprsz);
+ }
+
+ oprbits = oprsz * 8;
+ tcg_debug_assert(count <= oprbits);
+
+ bits = esz_mask;
+ if (oprbits & 63) {
+ bits &= MAKE_64BIT_MASK(0, oprbits & 63);
+ }
+
+ invcount = oprbits - count;
+ for (i = (oprsz - 1) / 8; i > invcount / 64; --i) {
+ d->p[i] = bits;
+ bits = esz_mask;
+ }
+
+ d->p[i] = bits & MAKE_64BIT_MASK(invcount & 63, 64);
+
+ while (--i >= 0) {
+ d->p[i] = 0;
+ }
+
+ return predtest_ones(d, oprsz, esz_mask);
+}
+
/* Recursive reduction on a function;
* C.f. the ARM ARM function ReducePredicated.
*
@@ -3344,6 +4761,94 @@ DO_ZPZ_FP(sve_ucvt_dh, uint64_t, , uint64_to_float16)
DO_ZPZ_FP(sve_ucvt_ds, uint64_t, , uint64_to_float32)
DO_ZPZ_FP(sve_ucvt_dd, uint64_t, , uint64_to_float64)
+static int16_t do_float16_logb_as_int(float16 a, float_status *s)
+{
+ /* Extract frac to the top of the uint32_t. */
+ uint32_t frac = (uint32_t)a << (16 + 6);
+ int16_t exp = extract32(a, 10, 5);
+
+ if (unlikely(exp == 0)) {
+ if (frac != 0) {
+ if (!get_flush_inputs_to_zero(s)) {
+ /* denormal: bias - fractional_zeros */
+ return -15 - clz32(frac);
+ }
+ /* flush to zero */
+ float_raise(float_flag_input_denormal, s);
+ }
+ } else if (unlikely(exp == 0x1f)) {
+ if (frac == 0) {
+ return INT16_MAX; /* infinity */
+ }
+ } else {
+ /* normal: exp - bias */
+ return exp - 15;
+ }
+ /* nan or zero */
+ float_raise(float_flag_invalid, s);
+ return INT16_MIN;
+}
+
+static int32_t do_float32_logb_as_int(float32 a, float_status *s)
+{
+ /* Extract frac to the top of the uint32_t. */
+ uint32_t frac = a << 9;
+ int32_t exp = extract32(a, 23, 8);
+
+ if (unlikely(exp == 0)) {
+ if (frac != 0) {
+ if (!get_flush_inputs_to_zero(s)) {
+ /* denormal: bias - fractional_zeros */
+ return -127 - clz32(frac);
+ }
+ /* flush to zero */
+ float_raise(float_flag_input_denormal, s);
+ }
+ } else if (unlikely(exp == 0xff)) {
+ if (frac == 0) {
+ return INT32_MAX; /* infinity */
+ }
+ } else {
+ /* normal: exp - bias */
+ return exp - 127;
+ }
+ /* nan or zero */
+ float_raise(float_flag_invalid, s);
+ return INT32_MIN;
+}
+
+static int64_t do_float64_logb_as_int(float64 a, float_status *s)
+{
+ /* Extract frac to the top of the uint64_t. */
+ uint64_t frac = a << 12;
+ int64_t exp = extract64(a, 52, 11);
+
+ if (unlikely(exp == 0)) {
+ if (frac != 0) {
+ if (!get_flush_inputs_to_zero(s)) {
+ /* denormal: bias - fractional_zeros */
+ return -1023 - clz64(frac);
+ }
+ /* flush to zero */
+ float_raise(float_flag_input_denormal, s);
+ }
+ } else if (unlikely(exp == 0x7ff)) {
+ if (frac == 0) {
+ return INT64_MAX; /* infinity */
+ }
+ } else {
+ /* normal: exp - bias */
+ return exp - 1023;
+ }
+ /* nan or zero */
+ float_raise(float_flag_invalid, s);
+ return INT64_MIN;
+}
+
+DO_ZPZ_FP(flogb_h, float16, H1_2, do_float16_logb_as_int)
+DO_ZPZ_FP(flogb_s, float32, H1_4, do_float32_logb_as_int)
+DO_ZPZ_FP(flogb_d, float64, , do_float64_logb_as_int)
+
#undef DO_ZPZ_FP
static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg,
@@ -5859,3 +7364,404 @@ DO_ST1_ZPZ_D(dd_be, zd, MO_64)
#undef DO_ST1_ZPZ_S
#undef DO_ST1_ZPZ_D
+
+void HELPER(sve2_eor3)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = n[i] ^ m[i] ^ k[i];
+ }
+}
+
+void HELPER(sve2_bcax)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = n[i] ^ (m[i] & ~k[i]);
+ }
+}
+
+void HELPER(sve2_bsl1n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = (~n[i] & k[i]) | (m[i] & ~k[i]);
+ }
+}
+
+void HELPER(sve2_bsl2n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = (n[i] & k[i]) | (~m[i] & ~k[i]);
+ }
+}
+
+void HELPER(sve2_nbsl)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ~((n[i] & k[i]) | (m[i] & ~k[i]));
+ }
+}
+
+/*
+ * Returns true if m0 or m1 contains the low uint8_t/uint16_t in n.
+ * See hasless(v,1) from
+ * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
+ */
+static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz)
+{
+ int bits = 8 << esz;
+ uint64_t ones = dup_const(esz, 1);
+ uint64_t signs = ones << (bits - 1);
+ uint64_t cmp0, cmp1;
+
+ cmp1 = dup_const(esz, n);
+ cmp0 = cmp1 ^ m0;
+ cmp1 = cmp1 ^ m1;
+ cmp0 = (cmp0 - ones) & ~cmp0;
+ cmp1 = (cmp1 - ones) & ~cmp1;
+ return (cmp0 | cmp1) & signs;
+}
+
+static inline uint32_t do_match(void *vd, void *vn, void *vm, void *vg,
+ uint32_t desc, int esz, bool nmatch)
+{
+ uint16_t esz_mask = pred_esz_masks[esz];
+ intptr_t opr_sz = simd_oprsz(desc);
+ uint32_t flags = PREDTEST_INIT;
+ intptr_t i, j, k;
+
+ for (i = 0; i < opr_sz; i += 16) {
+ uint64_t m0 = *(uint64_t *)(vm + i);
+ uint64_t m1 = *(uint64_t *)(vm + i + 8);
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)) & esz_mask;
+ uint16_t out = 0;
+
+ for (j = 0; j < 16; j += 8) {
+ uint64_t n = *(uint64_t *)(vn + i + j);
+
+ for (k = 0; k < 8; k += 1 << esz) {
+ if (pg & (1 << (j + k))) {
+ bool o = do_match2(n >> (k * 8), m0, m1, esz);
+ out |= (o ^ nmatch) << (j + k);
+ }
+ }
+ }
+ *(uint16_t *)(vd + H1_2(i >> 3)) = out;
+ flags = iter_predtest_fwd(out, pg, flags);
+ }
+ return flags;
+}
+
+#define DO_PPZZ_MATCH(NAME, ESZ, INV) \
+uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ return do_match(vd, vn, vm, vg, desc, ESZ, INV); \
+}
+
+DO_PPZZ_MATCH(sve2_match_ppzz_b, MO_8, false)
+DO_PPZZ_MATCH(sve2_match_ppzz_h, MO_16, false)
+
+DO_PPZZ_MATCH(sve2_nmatch_ppzz_b, MO_8, true)
+DO_PPZZ_MATCH(sve2_nmatch_ppzz_h, MO_16, true)
+
+#undef DO_PPZZ_MATCH
+
+void HELPER(sve2_histcnt_s)(void *vd, void *vn, void *vm, void *vg,
+ uint32_t desc)
+{
+ ARMVectorReg scratch;
+ intptr_t i, j;
+ intptr_t opr_sz = simd_oprsz(desc);
+ uint32_t *d = vd, *n = vn, *m = vm;
+ uint8_t *pg = vg;
+
+ if (d == n) {
+ n = memcpy(&scratch, n, opr_sz);
+ if (d == m) {
+ m = n;
+ }
+ } else if (d == m) {
+ m = memcpy(&scratch, m, opr_sz);
+ }
+
+ for (i = 0; i < opr_sz; i += 4) {
+ uint64_t count = 0;
+ uint8_t pred;
+
+ pred = pg[H1(i >> 3)] >> (i & 7);
+ if (pred & 1) {
+ uint32_t nn = n[H4(i >> 2)];
+
+ for (j = 0; j <= i; j += 4) {
+ pred = pg[H1(j >> 3)] >> (j & 7);
+ if ((pred & 1) && nn == m[H4(j >> 2)]) {
+ ++count;
+ }
+ }
+ }
+ d[H4(i >> 2)] = count;
+ }
+}
+
+void HELPER(sve2_histcnt_d)(void *vd, void *vn, void *vm, void *vg,
+ uint32_t desc)
+{
+ ARMVectorReg scratch;
+ intptr_t i, j;
+ intptr_t opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint8_t *pg = vg;
+
+ if (d == n) {
+ n = memcpy(&scratch, n, opr_sz);
+ if (d == m) {
+ m = n;
+ }
+ } else if (d == m) {
+ m = memcpy(&scratch, m, opr_sz);
+ }
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ uint64_t count = 0;
+ if (pg[H1(i)] & 1) {
+ uint64_t nn = n[i];
+ for (j = 0; j <= i; ++j) {
+ if ((pg[H1(j)] & 1) && nn == m[j]) {
+ ++count;
+ }
+ }
+ }
+ d[i] = count;
+ }
+}
+
+/*
+ * Returns the number of bytes in m0 and m1 that match n.
+ * Unlike do_match2 we don't just need true/false, we need an exact count.
+ * This requires two extra logical operations.
+ */
+static inline uint64_t do_histseg_cnt(uint8_t n, uint64_t m0, uint64_t m1)
+{
+ const uint64_t mask = dup_const(MO_8, 0x7f);
+ uint64_t cmp0, cmp1;
+
+ cmp1 = dup_const(MO_8, n);
+ cmp0 = cmp1 ^ m0;
+ cmp1 = cmp1 ^ m1;
+
+ /*
+ * 1: clear msb of each byte to avoid carry to next byte (& mask)
+ * 2: carry in to msb if byte != 0 (+ mask)
+ * 3: set msb if cmp has msb set (| cmp)
+ * 4: set ~msb to ignore them (| mask)
+ * We now have 0xff for byte != 0 or 0x7f for byte == 0.
+ * 5: invert, resulting in 0x80 if and only if byte == 0.
+ */
+ cmp0 = ~(((cmp0 & mask) + mask) | cmp0 | mask);
+ cmp1 = ~(((cmp1 & mask) + mask) | cmp1 | mask);
+
+ /*
+ * Combine the two compares in a way that the bits do
+ * not overlap, and so preserves the count of set bits.
+ * If the host has an efficient instruction for ctpop,
+ * then ctpop(x) + ctpop(y) has the same number of
+ * operations as ctpop(x | (y >> 1)). If the host does
+ * not have an efficient ctpop, then we only want to
+ * use it once.
+ */
+ return ctpop64(cmp0 | (cmp1 >> 1));
+}
+
+void HELPER(sve2_histseg)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j;
+ intptr_t opr_sz = simd_oprsz(desc);
+
+ for (i = 0; i < opr_sz; i += 16) {
+ uint64_t n0 = *(uint64_t *)(vn + i);
+ uint64_t m0 = *(uint64_t *)(vm + i);
+ uint64_t n1 = *(uint64_t *)(vn + i + 8);
+ uint64_t m1 = *(uint64_t *)(vm + i + 8);
+ uint64_t out0 = 0;
+ uint64_t out1 = 0;
+
+ for (j = 0; j < 64; j += 8) {
+ uint64_t cnt0 = do_histseg_cnt(n0 >> j, m0, m1);
+ uint64_t cnt1 = do_histseg_cnt(n1 >> j, m0, m1);
+ out0 |= cnt0 << j;
+ out1 |= cnt1 << j;
+ }
+
+ *(uint64_t *)(vd + i) = out0;
+ *(uint64_t *)(vd + i + 8) = out1;
+ }
+}
+
+void HELPER(sve2_xar_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ int shr = simd_data(desc);
+ int shl = 8 - shr;
+ uint64_t mask = dup_const(MO_8, 0xff >> shr);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ uint64_t t = n[i] ^ m[i];
+ d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
+ }
+}
+
+void HELPER(sve2_xar_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ int shr = simd_data(desc);
+ int shl = 16 - shr;
+ uint64_t mask = dup_const(MO_16, 0xffff >> shr);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ uint64_t t = n[i] ^ m[i];
+ d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
+ }
+}
+
+void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 4;
+ int shr = simd_data(desc);
+ uint32_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ror32(n[i] ^ m[i], shr);
+ }
+}
+
+void HELPER(fmmla_s)(void *vd, void *vn, void *vm, void *va,
+ void *status, uint32_t desc)
+{
+ intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float32) * 4);
+
+ for (s = 0; s < opr_sz; ++s) {
+ float32 *n = vn + s * sizeof(float32) * 4;
+ float32 *m = vm + s * sizeof(float32) * 4;
+ float32 *a = va + s * sizeof(float32) * 4;
+ float32 *d = vd + s * sizeof(float32) * 4;
+ float32 n00 = n[H4(0)], n01 = n[H4(1)];
+ float32 n10 = n[H4(2)], n11 = n[H4(3)];
+ float32 m00 = m[H4(0)], m01 = m[H4(1)];
+ float32 m10 = m[H4(2)], m11 = m[H4(3)];
+ float32 p0, p1;
+
+ /* i = 0, j = 0 */
+ p0 = float32_mul(n00, m00, status);
+ p1 = float32_mul(n01, m01, status);
+ d[H4(0)] = float32_add(a[H4(0)], float32_add(p0, p1, status), status);
+
+ /* i = 0, j = 1 */
+ p0 = float32_mul(n00, m10, status);
+ p1 = float32_mul(n01, m11, status);
+ d[H4(1)] = float32_add(a[H4(1)], float32_add(p0, p1, status), status);
+
+ /* i = 1, j = 0 */
+ p0 = float32_mul(n10, m00, status);
+ p1 = float32_mul(n11, m01, status);
+ d[H4(2)] = float32_add(a[H4(2)], float32_add(p0, p1, status), status);
+
+ /* i = 1, j = 1 */
+ p0 = float32_mul(n10, m10, status);
+ p1 = float32_mul(n11, m11, status);
+ d[H4(3)] = float32_add(a[H4(3)], float32_add(p0, p1, status), status);
+ }
+}
+
+void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va,
+ void *status, uint32_t desc)
+{
+ intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float64) * 4);
+
+ for (s = 0; s < opr_sz; ++s) {
+ float64 *n = vn + s * sizeof(float64) * 4;
+ float64 *m = vm + s * sizeof(float64) * 4;
+ float64 *a = va + s * sizeof(float64) * 4;
+ float64 *d = vd + s * sizeof(float64) * 4;
+ float64 n00 = n[0], n01 = n[1], n10 = n[2], n11 = n[3];
+ float64 m00 = m[0], m01 = m[1], m10 = m[2], m11 = m[3];
+ float64 p0, p1;
+
+ /* i = 0, j = 0 */
+ p0 = float64_mul(n00, m00, status);
+ p1 = float64_mul(n01, m01, status);
+ d[0] = float64_add(a[0], float64_add(p0, p1, status), status);
+
+ /* i = 0, j = 1 */
+ p0 = float64_mul(n00, m10, status);
+ p1 = float64_mul(n01, m11, status);
+ d[1] = float64_add(a[1], float64_add(p0, p1, status), status);
+
+ /* i = 1, j = 0 */
+ p0 = float64_mul(n10, m00, status);
+ p1 = float64_mul(n11, m01, status);
+ d[2] = float64_add(a[2], float64_add(p0, p1, status), status);
+
+ /* i = 1, j = 1 */
+ p0 = float64_mul(n10, m10, status);
+ p1 = float64_mul(n11, m11, status);
+ d[3] = float64_add(a[3], float64_add(p0, p1, status), status);
+ }
+}
+
+#define DO_FCVTNT(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
+{ \
+ intptr_t i = simd_oprsz(desc); \
+ uint64_t *g = vg; \
+ do { \
+ uint64_t pg = g[(i - 1) >> 6]; \
+ do { \
+ i -= sizeof(TYPEW); \
+ if (likely((pg >> (i & 63)) & 1)) { \
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
+ *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, status); \
+ } \
+ } while (i & 63); \
+ } while (i != 0); \
+}
+
+DO_FCVTNT(sve2_fcvtnt_sh, uint32_t, uint16_t, H1_4, H1_2, sve_f32_to_f16)
+DO_FCVTNT(sve2_fcvtnt_ds, uint64_t, uint32_t, , H1_4, float64_to_float32)
+
+#define DO_FCVTLT(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
+{ \
+ intptr_t i = simd_oprsz(desc); \
+ uint64_t *g = vg; \
+ do { \
+ uint64_t pg = g[(i - 1) >> 6]; \
+ do { \
+ i -= sizeof(TYPEW); \
+ if (likely((pg >> (i & 63)) & 1)) { \
+ TYPEN nn = *(TYPEN *)(vn + HN(i + sizeof(TYPEN))); \
+ *(TYPEW *)(vd + HW(i)) = OP(nn, status); \
+ } \
+ } while (i & 63); \
+ } while (i != 0); \
+}
+
+DO_FCVTLT(sve2_fcvtlt_hs, uint32_t, uint16_t, H1_4, H1_2, sve_f16_to_f32)
+DO_FCVTLT(sve2_fcvtlt_sd, uint64_t, uint32_t, , H1_4, float32_to_float64)
+
+#undef DO_FCVTLT
+#undef DO_FCVTNT
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 0c80d0b505..ceac0ee2bd 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -683,6 +683,34 @@ static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
tcg_temp_free_ptr(qc_ptr);
}
+/* Expand a 4-operand operation using an out-of-line helper. */
+static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
+ int rm, int ra, int data, gen_helper_gvec_4 *fn)
+{
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra),
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+}
+
+/*
+ * Expand a 4-operand + fpstatus pointer + simd data value operation using
+ * an out-of-line helper.
+ */
+static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
+ int rm, int ra, bool is_fp16, int data,
+ gen_helper_gvec_4_ptr *fn)
+{
+ TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra), fpst,
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+ tcg_temp_free_ptr(fpst);
+}
+
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
* than the 32 bit equivalent.
*/
@@ -12147,6 +12175,22 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
}
feature = dc_isar_feature(aa64_dp, s);
break;
+ case 0x03: /* USDOT */
+ if (size != MO_32) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_i8mm, s);
+ break;
+ case 0x04: /* SMMLA */
+ case 0x14: /* UMMLA */
+ case 0x05: /* USMMLA */
+ if (!is_q || size != MO_32) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_i8mm, s);
+ break;
case 0x18: /* FCMLA, #0 */
case 0x19: /* FCMLA, #90 */
case 0x1a: /* FCMLA, #180 */
@@ -12183,10 +12227,23 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
return;
case 0x2: /* SDOT / UDOT */
- gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0,
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
return;
+ case 0x3: /* USDOT */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
+ return;
+
+ case 0x04: /* SMMLA, UMMLA */
+ gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
+ u ? gen_helper_gvec_ummla_b
+ : gen_helper_gvec_smmla_b);
+ return;
+ case 0x05: /* USMMLA */
+ gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
+ return;
+
case 0x8: /* FCMLA, #0 */
case 0x9: /* FCMLA, #90 */
case 0xa: /* FCMLA, #180 */
@@ -12194,15 +12251,15 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
rot = extract32(opcode, 0, 2);
switch (size) {
case 1:
- gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
gen_helper_gvec_fcmlah);
break;
case 2:
- gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
gen_helper_gvec_fcmlas);
break;
case 3:
- gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
gen_helper_gvec_fcmlad);
break;
default:
@@ -13332,6 +13389,13 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
return;
}
break;
+ case 0x0f: /* SUDOT, USDOT */
+ if (is_scalar || (size & 1) || !dc_isar_feature(aa64_i8mm, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ size = MO_32;
+ break;
case 0x11: /* FCMLA #0 */
case 0x13: /* FCMLA #90 */
case 0x15: /* FCMLA #180 */
@@ -13442,10 +13506,17 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
switch (16 * u + opcode) {
case 0x0e: /* SDOT */
case 0x1e: /* UDOT */
- gen_gvec_op3_ool(s, is_q, rd, rn, rm, index,
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
u ? gen_helper_gvec_udot_idx_b
: gen_helper_gvec_sdot_idx_b);
return;
+ case 0x0f: /* SUDOT, USDOT */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
+ extract32(insn, 23, 1)
+ ? gen_helper_gvec_usdot_idx_b
+ : gen_helper_gvec_sudot_idx_b);
+ return;
+
case 0x11: /* FCMLA #0 */
case 0x13: /* FCMLA #90 */
case 0x15: /* FCMLA #180 */
@@ -13453,9 +13524,10 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
{
int rot = extract32(insn, 13, 2);
int data = (index << 2) | rot;
- tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm), fpst,
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, rd), fpst,
is_q ? 16 : 8, vec_full_reg_size(s), data,
size == MO_64
? gen_helper_gvec_fcmlas_idx
@@ -14349,8 +14421,6 @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn)
int imm6 = extract32(insn, 10, 6);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
- TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
- int pass;
if (!dc_isar_feature(aa64_sha3, s)) {
unallocated_encoding(s);
@@ -14361,25 +14431,10 @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn)
return;
}
- tcg_op1 = tcg_temp_new_i64();
- tcg_op2 = tcg_temp_new_i64();
- tcg_res[0] = tcg_temp_new_i64();
- tcg_res[1] = tcg_temp_new_i64();
-
- for (pass = 0; pass < 2; pass++) {
- read_vec_element(s, tcg_op1, rn, pass, MO_64);
- read_vec_element(s, tcg_op2, rm, pass, MO_64);
-
- tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
- tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
- }
- write_vec_element(s, tcg_res[0], rd, 0, MO_64);
- write_vec_element(s, tcg_res[1], rd, 1, MO_64);
-
- tcg_temp_free_i64(tcg_op1);
- tcg_temp_free_i64(tcg_op2);
- tcg_temp_free_i64(tcg_res[0]);
- tcg_temp_free_i64(tcg_res[1]);
+ gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), imm6, 16,
+ vec_full_reg_size(s));
}
/* Crypto three-reg imm2
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
index 89437276e7..58f50abca4 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/translate-a64.h
@@ -120,5 +120,8 @@ bool disas_sve(DisasContext *, uint32_t);
void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, int64_t shift,
+ uint32_t opr_sz, uint32_t max_sz);
#endif /* TARGET_ARM_TRANSLATE_A64_H */
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
index 658bd275da..9e990b41ed 100644
--- a/target/arm/translate-neon.c
+++ b/target/arm/translate-neon.c
@@ -151,24 +151,51 @@ static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var)
}
}
-static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
+static bool do_neon_ddda(DisasContext *s, int q, int vd, int vn, int vm,
+ int data, gen_helper_gvec_4 *fn_gvec)
{
- int opr_sz;
- TCGv_ptr fpst;
- gen_helper_gvec_3_ptr *fn_gvec_ptr;
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) {
+ return false;
+ }
- if (!dc_isar_feature(aa32_vcma, s)
- || (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s))) {
+ /*
+ * UNDEF accesses to odd registers for each bit of Q.
+ * Q will be 0b111 for all Q-reg instructions, otherwise
+ * when we have mixed Q- and D-reg inputs.
+ */
+ if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) {
return false;
}
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ int opr_sz = q ? 16 : 8;
+ tcg_gen_gvec_4_ool(vfp_reg_offset(1, vd),
+ vfp_reg_offset(1, vn),
+ vfp_reg_offset(1, vm),
+ vfp_reg_offset(1, vd),
+ opr_sz, opr_sz, data, fn_gvec);
+ return true;
+}
+
+static bool do_neon_ddda_fpst(DisasContext *s, int q, int vd, int vn, int vm,
+ int data, ARMFPStatusFlavour fp_flavour,
+ gen_helper_gvec_4_ptr *fn_gvec_ptr)
+{
/* UNDEF accesses to D16-D31 if they don't exist. */
- if (!dc_isar_feature(aa32_simd_r32, s) &&
- ((a->vd | a->vn | a->vm) & 0x10)) {
+ if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) {
return false;
}
- if ((a->vn | a->vm | a->vd) & a->q) {
+ /*
+ * UNDEF accesses to odd registers for each bit of Q.
+ * Q will be 0b111 for all Q-reg instructions, otherwise
+ * when we have mixed Q- and D-reg inputs.
+ */
+ if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) {
return false;
}
@@ -176,19 +203,34 @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
return true;
}
- opr_sz = (1 + a->q) * 8;
- fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
- fn_gvec_ptr = (a->size == MO_16) ?
- gen_helper_gvec_fcmlah : gen_helper_gvec_fcmlas;
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
- vfp_reg_offset(1, a->vn),
- vfp_reg_offset(1, a->vm),
- fpst, opr_sz, opr_sz, a->rot,
- fn_gvec_ptr);
+ int opr_sz = q ? 16 : 8;
+ TCGv_ptr fpst = fpstatus_ptr(fp_flavour);
+
+ tcg_gen_gvec_4_ptr(vfp_reg_offset(1, vd),
+ vfp_reg_offset(1, vn),
+ vfp_reg_offset(1, vm),
+ vfp_reg_offset(1, vd),
+ fpst, opr_sz, opr_sz, data, fn_gvec_ptr);
tcg_temp_free_ptr(fpst);
return true;
}
+static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
+{
+ if (!dc_isar_feature(aa32_vcma, s)) {
+ return false;
+ }
+ if (a->size == MO_16) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+ return do_neon_ddda_fpst(s, a->q * 7, a->vd, a->vn, a->vm, a->rot,
+ FPST_STD_F16, gen_helper_gvec_fcmlah);
+ }
+ return do_neon_ddda_fpst(s, a->q * 7, a->vd, a->vn, a->vm, a->rot,
+ FPST_STD, gen_helper_gvec_fcmlas);
+}
+
static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
{
int opr_sz;
@@ -227,36 +269,31 @@ static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
return true;
}
-static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
+static bool trans_VSDOT(DisasContext *s, arg_VSDOT *a)
{
- int opr_sz;
- gen_helper_gvec_3 *fn_gvec;
-
if (!dc_isar_feature(aa32_dp, s)) {
return false;
}
+ return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_sdot_b);
+}
- /* UNDEF accesses to D16-D31 if they don't exist. */
- if (!dc_isar_feature(aa32_simd_r32, s) &&
- ((a->vd | a->vn | a->vm) & 0x10)) {
+static bool trans_VUDOT(DisasContext *s, arg_VUDOT *a)
+{
+ if (!dc_isar_feature(aa32_dp, s)) {
return false;
}
+ return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_udot_b);
+}
- if ((a->vn | a->vm | a->vd) & a->q) {
+static bool trans_VUSDOT(DisasContext *s, arg_VUSDOT *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
return false;
}
-
- if (!vfp_access_check(s)) {
- return true;
- }
-
- opr_sz = (1 + a->q) * 8;
- fn_gvec = a->u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
- vfp_reg_offset(1, a->vn),
- vfp_reg_offset(1, a->vm),
- opr_sz, opr_sz, 0, fn_gvec);
- return true;
+ return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_usdot_b);
}
static bool trans_VFML(DisasContext *s, arg_VFML *a)
@@ -292,77 +329,56 @@ static bool trans_VFML(DisasContext *s, arg_VFML *a)
static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
{
- gen_helper_gvec_3_ptr *fn_gvec_ptr;
- int opr_sz;
- TCGv_ptr fpst;
+ int data = (a->index << 2) | a->rot;
if (!dc_isar_feature(aa32_vcma, s)) {
return false;
}
- if (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s)) {
- return false;
- }
-
- /* UNDEF accesses to D16-D31 if they don't exist. */
- if (!dc_isar_feature(aa32_simd_r32, s) &&
- ((a->vd | a->vn | a->vm) & 0x10)) {
- return false;
+ if (a->size == MO_16) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+ return do_neon_ddda_fpst(s, a->q * 6, a->vd, a->vn, a->vm, data,
+ FPST_STD_F16, gen_helper_gvec_fcmlah_idx);
}
+ return do_neon_ddda_fpst(s, a->q * 6, a->vd, a->vn, a->vm, data,
+ FPST_STD, gen_helper_gvec_fcmlas_idx);
+}
- if ((a->vd | a->vn) & a->q) {
+static bool trans_VSDOT_scalar(DisasContext *s, arg_VSDOT_scalar *a)
+{
+ if (!dc_isar_feature(aa32_dp, s)) {
return false;
}
-
- if (!vfp_access_check(s)) {
- return true;
- }
-
- fn_gvec_ptr = (a->size == MO_16) ?
- gen_helper_gvec_fcmlah_idx : gen_helper_gvec_fcmlas_idx;
- opr_sz = (1 + a->q) * 8;
- fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
- vfp_reg_offset(1, a->vn),
- vfp_reg_offset(1, a->vm),
- fpst, opr_sz, opr_sz,
- (a->index << 2) | a->rot, fn_gvec_ptr);
- tcg_temp_free_ptr(fpst);
- return true;
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_sdot_idx_b);
}
-static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
+static bool trans_VUDOT_scalar(DisasContext *s, arg_VUDOT_scalar *a)
{
- gen_helper_gvec_3 *fn_gvec;
- int opr_sz;
- TCGv_ptr fpst;
-
if (!dc_isar_feature(aa32_dp, s)) {
return false;
}
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_udot_idx_b);
+}
- /* UNDEF accesses to D16-D31 if they don't exist. */
- if (!dc_isar_feature(aa32_simd_r32, s) &&
- ((a->vd | a->vn) & 0x10)) {
+static bool trans_VUSDOT_scalar(DisasContext *s, arg_VUSDOT_scalar *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
return false;
}
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_usdot_idx_b);
+}
- if ((a->vd | a->vn) & a->q) {
+static bool trans_VSUDOT_scalar(DisasContext *s, arg_VSUDOT_scalar *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
return false;
}
-
- if (!vfp_access_check(s)) {
- return true;
- }
-
- fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
- opr_sz = (1 + a->q) * 8;
- fpst = fpstatus_ptr(FPST_STD);
- tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
- vfp_reg_offset(1, a->vn),
- vfp_reg_offset(1, a->rm),
- opr_sz, opr_sz, a->index, fn_gvec);
- tcg_temp_free_ptr(fpst);
- return true;
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_sudot_idx_b);
}
static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
@@ -4020,3 +4036,30 @@ static bool trans_VTRN(DisasContext *s, arg_2misc *a)
tcg_temp_free_i32(tmp2);
return true;
}
+
+static bool trans_VSMMLA(DisasContext *s, arg_VSMMLA *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_smmla_b);
+}
+
+static bool trans_VUMMLA(DisasContext *s, arg_VUMMLA *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_ummla_b);
+}
+
+static bool trans_VUSMMLA(DisasContext *s, arg_VUSMMLA *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_usmmla_b);
+}
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 864ed669c4..9574efe957 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -163,6 +163,18 @@ static void gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
vsz, vsz, data, fn);
}
+/* Invoke an out-of-line helper on 4 Zregs. */
+static void gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
+ int rd, int rn, int rm, int ra, int data)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra),
+ vsz, vsz, data, fn);
+}
+
/* Invoke an out-of-line helper on 2 Zregs and a predicate. */
static void gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn,
int rd, int rn, int pg, int data)
@@ -205,6 +217,17 @@ static void gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
vec_full_reg_offset(s, rm), vsz, vsz);
}
+/* Invoke a vector expander on four Zregs. */
+static void gen_gvec_fn_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn,
+ int esz, int rd, int rn, int rm, int ra)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ gvec_fn(esz, vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra), vsz, vsz);
+}
+
/* Invoke a vector move on two Zregs. */
static bool do_mov_z(DisasContext *s, int rd, int rn)
{
@@ -317,6 +340,312 @@ static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a)
return do_zzz_fn(s, a, tcg_gen_gvec_andc);
}
+static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ uint64_t mask = dup_const(MO_8, 0xff >> sh);
+
+ tcg_gen_xor_i64(t, n, m);
+ tcg_gen_shri_i64(d, t, sh);
+ tcg_gen_shli_i64(t, t, 8 - sh);
+ tcg_gen_andi_i64(d, d, mask);
+ tcg_gen_andi_i64(t, t, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ uint64_t mask = dup_const(MO_16, 0xffff >> sh);
+
+ tcg_gen_xor_i64(t, n, m);
+ tcg_gen_shri_i64(d, t, sh);
+ tcg_gen_shli_i64(t, t, 16 - sh);
+ tcg_gen_andi_i64(d, d, mask);
+ tcg_gen_andi_i64(t, t, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh)
+{
+ tcg_gen_xor_i32(d, n, m);
+ tcg_gen_rotri_i32(d, d, sh);
+}
+
+static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
+{
+ tcg_gen_xor_i64(d, n, m);
+ tcg_gen_rotri_i64(d, d, sh);
+}
+
+static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, int64_t sh)
+{
+ tcg_gen_xor_vec(vece, d, n, m);
+ tcg_gen_rotri_vec(vece, d, d, sh);
+}
+
+void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, int64_t shift,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 };
+ static const GVecGen3i ops[4] = {
+ { .fni8 = gen_xar8_i64,
+ .fniv = gen_xar_vec,
+ .fno = gen_helper_sve2_xar_b,
+ .opt_opc = vecop,
+ .vece = MO_8 },
+ { .fni8 = gen_xar16_i64,
+ .fniv = gen_xar_vec,
+ .fno = gen_helper_sve2_xar_h,
+ .opt_opc = vecop,
+ .vece = MO_16 },
+ { .fni4 = gen_xar_i32,
+ .fniv = gen_xar_vec,
+ .fno = gen_helper_sve2_xar_s,
+ .opt_opc = vecop,
+ .vece = MO_32 },
+ { .fni8 = gen_xar_i64,
+ .fniv = gen_xar_vec,
+ .fno = gen_helper_gvec_xar_d,
+ .opt_opc = vecop,
+ .vece = MO_64 }
+ };
+ int esize = 8 << vece;
+
+ /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */
+ tcg_debug_assert(shift >= 0);
+ tcg_debug_assert(shift <= esize);
+ shift &= esize - 1;
+
+ if (shift == 0) {
+ /* xar with no rotate devolves to xor. */
+ tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz);
+ } else {
+ tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz,
+ shift, &ops[vece]);
+ }
+}
+
+static bool trans_XAR(DisasContext *s, arg_rrri_esz *a)
+{
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz);
+ }
+ return true;
+}
+
+static bool do_sve2_zzzz_fn(DisasContext *s, arg_rrrr_esz *a, GVecGen4Fn *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzzz(s, fn, a->esz, a->rd, a->rn, a->rm, a->ra);
+ }
+ return true;
+}
+
+static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_xor_i64(d, n, m);
+ tcg_gen_xor_i64(d, d, k);
+}
+
+static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ tcg_gen_xor_vec(vece, d, n, m);
+ tcg_gen_xor_vec(vece, d, d, k);
+}
+
+static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_eor3_i64,
+ .fniv = gen_eor3_vec,
+ .fno = gen_helper_sve2_eor3,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_EOR3(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_eor3);
+}
+
+static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_andc_i64(d, m, k);
+ tcg_gen_xor_i64(d, d, n);
+}
+
+static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ tcg_gen_andc_vec(vece, d, m, k);
+ tcg_gen_xor_vec(vece, d, d, n);
+}
+
+static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bcax_i64,
+ .fniv = gen_bcax_vec,
+ .fno = gen_helper_sve2_bcax,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_BCAX(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bcax);
+}
+
+static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ /* BSL differs from the generic bitsel in argument ordering. */
+ tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz);
+}
+
+static bool trans_BSL(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bsl);
+}
+
+static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_andc_i64(n, k, n);
+ tcg_gen_andc_i64(m, m, k);
+ tcg_gen_or_i64(d, n, m);
+}
+
+static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ if (TCG_TARGET_HAS_bitsel_vec) {
+ tcg_gen_not_vec(vece, n, n);
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
+ } else {
+ tcg_gen_andc_vec(vece, n, k, n);
+ tcg_gen_andc_vec(vece, m, m, k);
+ tcg_gen_or_vec(vece, d, n, m);
+ }
+}
+
+static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bsl1n_i64,
+ .fniv = gen_bsl1n_vec,
+ .fno = gen_helper_sve2_bsl1n,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_BSL1N(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bsl1n);
+}
+
+static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ /*
+ * Z[dn] = (n & k) | (~m & ~k)
+ * = | ~(m | k)
+ */
+ tcg_gen_and_i64(n, n, k);
+ if (TCG_TARGET_HAS_orc_i64) {
+ tcg_gen_or_i64(m, m, k);
+ tcg_gen_orc_i64(d, n, m);
+ } else {
+ tcg_gen_nor_i64(m, m, k);
+ tcg_gen_or_i64(d, n, m);
+ }
+}
+
+static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ if (TCG_TARGET_HAS_bitsel_vec) {
+ tcg_gen_not_vec(vece, m, m);
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
+ } else {
+ tcg_gen_and_vec(vece, n, n, k);
+ tcg_gen_or_vec(vece, m, m, k);
+ tcg_gen_orc_vec(vece, d, n, m);
+ }
+}
+
+static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bsl2n_i64,
+ .fniv = gen_bsl2n_vec,
+ .fno = gen_helper_sve2_bsl2n,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_BSL2N(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bsl2n);
+}
+
+static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_and_i64(n, n, k);
+ tcg_gen_andc_i64(m, m, k);
+ tcg_gen_nor_i64(d, n, m);
+}
+
+static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
+ tcg_gen_not_vec(vece, d, d);
+}
+
+static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_nbsl_i64,
+ .fniv = gen_nbsl_vec,
+ .fno = gen_helper_sve2_nbsl,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_NBSL(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_nbsl);
+}
+
/*
*** SVE Integer Arithmetic - Unpredicated Group
*/
@@ -715,6 +1044,66 @@ static bool trans_ASRD(DisasContext *s, arg_rpri_esz *a)
}
}
+static bool trans_SQSHL_zpzi(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h,
+ gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_UQSHL_zpzi(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h,
+ gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SRSHR(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h,
+ gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_URSHR(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h,
+ gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SQSHLU(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h,
+ gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
/*
*** SVE Bitwise Shift - Predicated Group
*/
@@ -1937,18 +2326,18 @@ static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a)
*** SVE Permute Extract Group
*/
-static bool trans_EXT(DisasContext *s, arg_EXT *a)
+static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm)
{
if (!sve_access_check(s)) {
return true;
}
unsigned vsz = vec_full_reg_size(s);
- unsigned n_ofs = a->imm >= vsz ? 0 : a->imm;
+ unsigned n_ofs = imm >= vsz ? 0 : imm;
unsigned n_siz = vsz - n_ofs;
- unsigned d = vec_full_reg_offset(s, a->rd);
- unsigned n = vec_full_reg_offset(s, a->rn);
- unsigned m = vec_full_reg_offset(s, a->rm);
+ unsigned d = vec_full_reg_offset(s, rd);
+ unsigned n = vec_full_reg_offset(s, rn);
+ unsigned m = vec_full_reg_offset(s, rm);
/* Use host vector move insns if we have appropriate sizes
* and no unfortunate overlap.
@@ -1967,6 +2356,19 @@ static bool trans_EXT(DisasContext *s, arg_EXT *a)
return true;
}
+static bool trans_EXT(DisasContext *s, arg_EXT *a)
+{
+ return do_EXT(s, a->rd, a->rn, a->rm, a->imm);
+}
+
+static bool trans_EXT_sve2(DisasContext *s, arg_rri *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_EXT(s, a->rd, a->rn, (a->rn + 1) % 32, a->imm);
+}
+
/*
*** SVE Permute - Unpredicated Group
*/
@@ -2075,6 +2477,39 @@ static bool trans_TBL(DisasContext *s, arg_rrr_esz *a)
return true;
}
+static bool trans_TBL_sve2(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[4] = {
+ gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h,
+ gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn,
+ (a->rn + 1) % 32, a->rm, 0);
+ }
+ return true;
+}
+
+static bool trans_TBX(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h,
+ gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0);
+ }
+ return true;
+}
+
static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
{
static gen_helper_gvec_2 * const fns[4][2] = {
@@ -2249,6 +2684,32 @@ static bool trans_ZIP2_z(DisasContext *s, arg_rrr_esz *a)
return do_zip(s, a, true);
}
+static bool do_zip_q(DisasContext *s, arg_rrr_esz *a, bool high)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned high_ofs = high ? QEMU_ALIGN_DOWN(vsz, 32) / 2 : 0;
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn) + high_ofs,
+ vec_full_reg_offset(s, a->rm) + high_ofs,
+ vsz, vsz, 0, gen_helper_sve2_zip_q);
+ }
+ return true;
+}
+
+static bool trans_ZIP1_q(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zip_q(s, a, false);
+}
+
+static bool trans_ZIP2_q(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zip_q(s, a, true);
+}
+
static gen_helper_gvec_3 * const uzp_fns[4] = {
gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
@@ -2264,6 +2725,22 @@ static bool trans_UZP2_z(DisasContext *s, arg_rrr_esz *a)
return do_zzz_data_ool(s, a, 1 << a->esz, uzp_fns[a->esz]);
}
+static bool trans_UZP1_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 0, gen_helper_sve2_uzp_q);
+}
+
+static bool trans_UZP2_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 16, gen_helper_sve2_uzp_q);
+}
+
static gen_helper_gvec_3 * const trn_fns[4] = {
gen_helper_sve_trn_b, gen_helper_sve_trn_h,
gen_helper_sve_trn_s, gen_helper_sve_trn_d,
@@ -2279,6 +2756,22 @@ static bool trans_TRN2_z(DisasContext *s, arg_rrr_esz *a)
return do_zzz_data_ool(s, a, 1 << a->esz, trn_fns[a->esz]);
}
+static bool trans_TRN1_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 0, gen_helper_sve2_trn_q);
+}
+
+static bool trans_TRN2_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 16, gen_helper_sve2_trn_q);
+}
+
/*
*** SVE Permute Vector - Predicated Group
*/
@@ -2684,6 +3177,18 @@ static bool trans_SPLICE(DisasContext *s, arg_rprr_esz *a)
return true;
}
+static bool trans_SPLICE_sve2(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzp(s, gen_helper_sve_splice,
+ a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz);
+ }
+ return true;
+}
+
/*
*** SVE Integer Compare - Vectors Group
*/
@@ -3100,7 +3605,14 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
unsigned vsz = vec_full_reg_size(s);
unsigned desc = 0;
TCGCond cond;
+ uint64_t maxval;
+ /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
+ bool eq = a->eq == a->lt;
+ /* The greater-than conditions are all SVE2. */
+ if (!a->lt && !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
if (!sve_access_check(s)) {
return true;
}
@@ -3123,22 +3635,42 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
*/
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
- tcg_gen_sub_i64(t0, op1, op0);
+
+ if (a->lt) {
+ tcg_gen_sub_i64(t0, op1, op0);
+ if (a->u) {
+ maxval = a->sf ? UINT64_MAX : UINT32_MAX;
+ cond = eq ? TCG_COND_LEU : TCG_COND_LTU;
+ } else {
+ maxval = a->sf ? INT64_MAX : INT32_MAX;
+ cond = eq ? TCG_COND_LE : TCG_COND_LT;
+ }
+ } else {
+ tcg_gen_sub_i64(t0, op0, op1);
+ if (a->u) {
+ maxval = 0;
+ cond = eq ? TCG_COND_GEU : TCG_COND_GTU;
+ } else {
+ maxval = a->sf ? INT64_MIN : INT32_MIN;
+ cond = eq ? TCG_COND_GE : TCG_COND_GT;
+ }
+ }
tmax = tcg_const_i64(vsz >> a->esz);
- if (a->eq) {
+ if (eq) {
/* Equality means one more iteration. */
tcg_gen_addi_i64(t0, t0, 1);
- /* If op1 is max (un)signed integer (and the only time the addition
- * above could overflow), then we produce an all-true predicate by
- * setting the count to the vector length. This is because the
- * pseudocode is described as an increment + compare loop, and the
- * max integer would always compare true.
+ /*
+ * For the less-than while, if op1 is maxval (and the only time
+ * the addition above could overflow), then we produce an all-true
+ * predicate by setting the count to the vector length. This is
+ * because the pseudocode is described as an increment + compare
+ * loop, and the maximum integer would always compare true.
+ * Similarly, the greater-than while has the same issue with the
+ * minimum integer due to the decrement + compare loop.
*/
- tcg_gen_movi_i64(t1, (a->sf
- ? (a->u ? UINT64_MAX : INT64_MAX)
- : (a->u ? UINT32_MAX : INT32_MAX)));
+ tcg_gen_movi_i64(t1, maxval);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
}
@@ -3147,9 +3679,6 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
tcg_temp_free_i64(tmax);
/* Set the count to zero if the condition is false. */
- cond = (a->u
- ? (a->eq ? TCG_COND_LEU : TCG_COND_LTU)
- : (a->eq ? TCG_COND_LE : TCG_COND_LT));
tcg_gen_movi_i64(t1, 0);
tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
tcg_temp_free_i64(t1);
@@ -3169,7 +3698,78 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
ptr = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
- gen_helper_sve_while(t2, ptr, t2, t3);
+ if (a->lt) {
+ gen_helper_sve_whilel(t2, ptr, t2, t3);
+ } else {
+ gen_helper_sve_whileg(t2, ptr, t2, t3);
+ }
+ do_pred_flags(t2);
+
+ tcg_temp_free_ptr(ptr);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ return true;
+}
+
+static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
+{
+ TCGv_i64 op0, op1, diff, t1, tmax;
+ TCGv_i32 t2, t3;
+ TCGv_ptr ptr;
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned desc = 0;
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ op0 = read_cpu_reg(s, a->rn, 1);
+ op1 = read_cpu_reg(s, a->rm, 1);
+
+ tmax = tcg_const_i64(vsz);
+ diff = tcg_temp_new_i64();
+
+ if (a->rw) {
+ /* WHILERW */
+ /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */
+ t1 = tcg_temp_new_i64();
+ tcg_gen_sub_i64(diff, op0, op1);
+ tcg_gen_sub_i64(t1, op1, op0);
+ tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1);
+ tcg_temp_free_i64(t1);
+ /* Round down to a multiple of ESIZE. */
+ tcg_gen_andi_i64(diff, diff, -1 << a->esz);
+ /* If op1 == op0, diff == 0, and the condition is always true. */
+ tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff);
+ } else {
+ /* WHILEWR */
+ tcg_gen_sub_i64(diff, op1, op0);
+ /* Round down to a multiple of ESIZE. */
+ tcg_gen_andi_i64(diff, diff, -1 << a->esz);
+ /* If op0 >= op1, diff <= 0, the condition is always true. */
+ tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff);
+ }
+
+ /* Bound to the maximum. */
+ tcg_gen_umin_i64(diff, diff, tmax);
+ tcg_temp_free_i64(tmax);
+
+ /* Since we're bounded, pass as a 32-bit type. */
+ t2 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t2, diff);
+ tcg_temp_free_i64(diff);
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
+ t3 = tcg_const_i32(desc);
+
+ ptr = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
+
+ gen_helper_sve_whilel(t2, ptr, t2, t3);
do_pred_flags(t2);
tcg_temp_free_ptr(ptr);
@@ -3351,38 +3951,221 @@ DO_ZZI(UMIN, umin)
#undef DO_ZZI
-static bool trans_DOT_zzz(DisasContext *s, arg_DOT_zzz *a)
+static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a)
{
- static gen_helper_gvec_3 * const fns[2][2] = {
+ static gen_helper_gvec_4 * const fns[2][2] = {
{ gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h },
{ gen_helper_gvec_udot_b, gen_helper_gvec_udot_h }
};
if (sve_access_check(s)) {
- gen_gvec_ool_zzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, 0);
+ gen_gvec_ool_zzzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0);
}
return true;
}
-static bool trans_DOT_zzx(DisasContext *s, arg_DOT_zzx *a)
+/*
+ * SVE Multiply - Indexed
+ */
+
+static bool do_zzxz_ool(DisasContext *s, arg_rrxr_esz *a,
+ gen_helper_gvec_4 *fn)
{
- static gen_helper_gvec_3 * const fns[2][2] = {
- { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h },
- { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h }
- };
+ if (fn == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
+ }
+ return true;
+}
+
+#define DO_RRXR(NAME, FUNC) \
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
+ { return do_zzxz_ool(s, a, FUNC); }
+DO_RRXR(trans_SDOT_zzxw_s, gen_helper_gvec_sdot_idx_b)
+DO_RRXR(trans_SDOT_zzxw_d, gen_helper_gvec_sdot_idx_h)
+DO_RRXR(trans_UDOT_zzxw_s, gen_helper_gvec_udot_idx_b)
+DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h)
+
+static bool trans_SUDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_i8mm, s)) {
+ return false;
+ }
+ return do_zzxz_ool(s, a, gen_helper_gvec_sudot_idx_b);
+}
+
+static bool trans_USDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_i8mm, s)) {
+ return false;
+ }
+ return do_zzxz_ool(s, a, gen_helper_gvec_usdot_idx_b);
+}
+
+#undef DO_RRXR
+
+static bool do_sve2_zzz_data(DisasContext *s, int rd, int rn, int rm, int data,
+ gen_helper_gvec_3 *fn)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vsz, vsz, data, fn);
+ }
+ return true;
+}
+
+#define DO_SVE2_RRX(NAME, FUNC) \
+ static bool NAME(DisasContext *s, arg_rrx_esz *a) \
+ { return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, a->index, FUNC); }
+
+DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h)
+DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s)
+DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
+
+DO_SVE2_RRX(trans_SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h)
+DO_SVE2_RRX(trans_SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s)
+DO_SVE2_RRX(trans_SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d)
+
+DO_SVE2_RRX(trans_SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h)
+DO_SVE2_RRX(trans_SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s)
+DO_SVE2_RRX(trans_SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d)
+
+#undef DO_SVE2_RRX
+
+#define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
+ static bool NAME(DisasContext *s, arg_rrx_esz *a) \
+ { \
+ return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, \
+ (a->index << 1) | TOP, FUNC); \
+ }
+
+DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false)
+DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false)
+DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true)
+DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true)
+
+DO_SVE2_RRX_TB(trans_SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false)
+DO_SVE2_RRX_TB(trans_SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false)
+DO_SVE2_RRX_TB(trans_SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true)
+DO_SVE2_RRX_TB(trans_SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true)
+
+DO_SVE2_RRX_TB(trans_UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false)
+DO_SVE2_RRX_TB(trans_UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false)
+DO_SVE2_RRX_TB(trans_UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true)
+DO_SVE2_RRX_TB(trans_UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true)
+
+#undef DO_SVE2_RRX_TB
+
+static bool do_sve2_zzzz_data(DisasContext *s, int rd, int rn, int rm, int ra,
+ int data, gen_helper_gvec_4 *fn)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
if (sve_access_check(s)) {
- gen_gvec_ool_zzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, a->index);
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra),
+ vsz, vsz, data, fn);
}
return true;
}
+#define DO_SVE2_RRXR(NAME, FUNC) \
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
+ { return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, a->index, FUNC); }
+
+DO_SVE2_RRXR(trans_MLA_zzxz_h, gen_helper_gvec_mla_idx_h)
+DO_SVE2_RRXR(trans_MLA_zzxz_s, gen_helper_gvec_mla_idx_s)
+DO_SVE2_RRXR(trans_MLA_zzxz_d, gen_helper_gvec_mla_idx_d)
+
+DO_SVE2_RRXR(trans_MLS_zzxz_h, gen_helper_gvec_mls_idx_h)
+DO_SVE2_RRXR(trans_MLS_zzxz_s, gen_helper_gvec_mls_idx_s)
+DO_SVE2_RRXR(trans_MLS_zzxz_d, gen_helper_gvec_mls_idx_d)
+
+DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h)
+DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s)
+DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d)
+
+DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h)
+DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s)
+DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d)
+
+#undef DO_SVE2_RRXR
+
+#define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
+ { \
+ return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->rd, \
+ (a->index << 1) | TOP, FUNC); \
+ }
+
+DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false)
+DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false)
+DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true)
+DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false)
+DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false)
+DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true)
+DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false)
+DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false)
+DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true)
+DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false)
+DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false)
+DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true)
+DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false)
+DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false)
+DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true)
+DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false)
+DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false)
+DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true)
+DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true)
+
+#undef DO_SVE2_RRXR_TB
+
+#define DO_SVE2_RRXR_ROT(NAME, FUNC) \
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
+ { \
+ return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, \
+ (a->index << 2) | a->rot, FUNC); \
+ }
+
+DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h)
+DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s)
+
+DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h)
+DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s)
+
+DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s)
+DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d)
+
+#undef DO_SVE2_RRXR_ROT
/*
*** SVE Floating Point Multiply-Add Indexed Group
*/
-static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
+static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub)
{
static gen_helper_gvec_4_ptr * const fns[3] = {
gen_helper_gvec_fmla_idx_h,
@@ -3397,13 +4180,23 @@ static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
vec_full_reg_offset(s, a->rn),
vec_full_reg_offset(s, a->rm),
vec_full_reg_offset(s, a->ra),
- status, vsz, vsz, (a->index << 1) | a->sub,
+ status, vsz, vsz, (a->index << 1) | sub,
fns[a->esz - 1]);
tcg_temp_free_ptr(status);
}
return true;
}
+static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
+{
+ return do_FMLA_zzxz(s, a, false);
+}
+
+static bool trans_FMLS_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
+{
+ return do_FMLA_zzxz(s, a, true);
+}
+
/*
*** SVE Floating Point Multiply Indexed Group
*/
@@ -3933,7 +4726,7 @@ static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a)
static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
{
- static gen_helper_gvec_3_ptr * const fns[2] = {
+ static gen_helper_gvec_4_ptr * const fns[2] = {
gen_helper_gvec_fcmlah_idx,
gen_helper_gvec_fcmlas_idx,
};
@@ -3943,9 +4736,10 @@ static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
if (sve_access_check(s)) {
unsigned vsz = vec_full_reg_size(s);
TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
- tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
status, vsz, vsz,
a->index * 4 + a->rot,
fns[a->esz - 1]);
@@ -4101,11 +4895,9 @@ static bool trans_FRINTX(DisasContext *s, arg_rpr_esz *a)
return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
}
-static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, int mode)
+static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
+ int mode, gen_helper_gvec_3_ptr *fn)
{
- if (a->esz == 0) {
- return false;
- }
if (sve_access_check(s)) {
unsigned vsz = vec_full_reg_size(s);
TCGv_i32 tmode = tcg_const_i32(mode);
@@ -4116,7 +4908,7 @@ static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, int mode)
tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
pred_full_reg_offset(s, a->pg),
- status, vsz, vsz, 0, frint_fns[a->esz - 1]);
+ status, vsz, vsz, 0, fn);
gen_helper_set_rmode(tmode, tmode, status);
tcg_temp_free_i32(tmode);
@@ -4127,27 +4919,42 @@ static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, int mode)
static bool trans_FRINTN(DisasContext *s, arg_rpr_esz *a)
{
- return do_frint_mode(s, a, float_round_nearest_even);
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_nearest_even, frint_fns[a->esz - 1]);
}
static bool trans_FRINTP(DisasContext *s, arg_rpr_esz *a)
{
- return do_frint_mode(s, a, float_round_up);
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_up, frint_fns[a->esz - 1]);
}
static bool trans_FRINTM(DisasContext *s, arg_rpr_esz *a)
{
- return do_frint_mode(s, a, float_round_down);
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_down, frint_fns[a->esz - 1]);
}
static bool trans_FRINTZ(DisasContext *s, arg_rpr_esz *a)
{
- return do_frint_mode(s, a, float_round_to_zero);
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_to_zero, frint_fns[a->esz - 1]);
}
static bool trans_FRINTA(DisasContext *s, arg_rpr_esz *a)
{
- return do_frint_mode(s, a, float_round_ties_away);
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_ties_away, frint_fns[a->esz - 1]);
}
static bool trans_FRECPX(DisasContext *s, arg_rpr_esz *a)
@@ -4526,128 +5333,130 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
tcg_temp_free_i32(t_desc);
}
+/* Indexed by [mte][be][dtype][nreg] */
+static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
+ { /* mte inactive, little-endian */
+ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
+ gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
+ { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
+ gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
+ { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
+ gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
+ { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
+ gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
+
+ /* mte inactive, big-endian */
+ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
+ gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
+ { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
+ gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
+ { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
+ gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
+ { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
+ gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } },
+
+ { /* mte active, little-endian */
+ { { gen_helper_sve_ld1bb_r_mte,
+ gen_helper_sve_ld2bb_r_mte,
+ gen_helper_sve_ld3bb_r_mte,
+ gen_helper_sve_ld4bb_r_mte },
+ { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hh_le_r_mte,
+ gen_helper_sve_ld2hh_le_r_mte,
+ gen_helper_sve_ld3hh_le_r_mte,
+ gen_helper_sve_ld4hh_le_r_mte },
+ { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1ss_le_r_mte,
+ gen_helper_sve_ld2ss_le_r_mte,
+ gen_helper_sve_ld3ss_le_r_mte,
+ gen_helper_sve_ld4ss_le_r_mte },
+ { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dd_le_r_mte,
+ gen_helper_sve_ld2dd_le_r_mte,
+ gen_helper_sve_ld3dd_le_r_mte,
+ gen_helper_sve_ld4dd_le_r_mte } },
+
+ /* mte active, big-endian */
+ { { gen_helper_sve_ld1bb_r_mte,
+ gen_helper_sve_ld2bb_r_mte,
+ gen_helper_sve_ld3bb_r_mte,
+ gen_helper_sve_ld4bb_r_mte },
+ { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hh_be_r_mte,
+ gen_helper_sve_ld2hh_be_r_mte,
+ gen_helper_sve_ld3hh_be_r_mte,
+ gen_helper_sve_ld4hh_be_r_mte },
+ { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1ss_be_r_mte,
+ gen_helper_sve_ld2ss_be_r_mte,
+ gen_helper_sve_ld3ss_be_r_mte,
+ gen_helper_sve_ld4ss_be_r_mte },
+ { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dd_be_r_mte,
+ gen_helper_sve_ld2dd_be_r_mte,
+ gen_helper_sve_ld3dd_be_r_mte,
+ gen_helper_sve_ld4dd_be_r_mte } } },
+};
+
static void do_ld_zpa(DisasContext *s, int zt, int pg,
TCGv_i64 addr, int dtype, int nreg)
{
- static gen_helper_gvec_mem * const fns[2][2][16][4] = {
- { /* mte inactive, little-endian */
- { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
- gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
- { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
- gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
- { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
- gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
- { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
- gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
-
- /* mte inactive, big-endian */
- { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
- gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
- { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
- gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
- { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
- gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
- { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
- { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
- gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } },
-
- { /* mte active, little-endian */
- { { gen_helper_sve_ld1bb_r_mte,
- gen_helper_sve_ld2bb_r_mte,
- gen_helper_sve_ld3bb_r_mte,
- gen_helper_sve_ld4bb_r_mte },
- { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1hh_le_r_mte,
- gen_helper_sve_ld2hh_le_r_mte,
- gen_helper_sve_ld3hh_le_r_mte,
- gen_helper_sve_ld4hh_le_r_mte },
- { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1ss_le_r_mte,
- gen_helper_sve_ld2ss_le_r_mte,
- gen_helper_sve_ld3ss_le_r_mte,
- gen_helper_sve_ld4ss_le_r_mte },
- { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1dd_le_r_mte,
- gen_helper_sve_ld2dd_le_r_mte,
- gen_helper_sve_ld3dd_le_r_mte,
- gen_helper_sve_ld4dd_le_r_mte } },
-
- /* mte active, big-endian */
- { { gen_helper_sve_ld1bb_r_mte,
- gen_helper_sve_ld2bb_r_mte,
- gen_helper_sve_ld3bb_r_mte,
- gen_helper_sve_ld4bb_r_mte },
- { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1hh_be_r_mte,
- gen_helper_sve_ld2hh_be_r_mte,
- gen_helper_sve_ld3hh_be_r_mte,
- gen_helper_sve_ld4hh_be_r_mte },
- { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1ss_be_r_mte,
- gen_helper_sve_ld2ss_be_r_mte,
- gen_helper_sve_ld3ss_be_r_mte,
- gen_helper_sve_ld4ss_be_r_mte },
- { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL },
-
- { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
- { gen_helper_sve_ld1dd_be_r_mte,
- gen_helper_sve_ld2dd_be_r_mte,
- gen_helper_sve_ld3dd_be_r_mte,
- gen_helper_sve_ld4dd_be_r_mte } } },
- };
gen_helper_gvec_mem *fn
- = fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg];
+ = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg];
/*
* While there are holes in the table, they are not
@@ -4885,23 +5694,13 @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
return true;
}
-static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz)
+static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
{
- static gen_helper_gvec_mem * const fns[2][4] = {
- { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_le_r,
- gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld1dd_le_r },
- { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_be_r,
- gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld1dd_be_r },
- };
unsigned vsz = vec_full_reg_size(s);
TCGv_ptr t_pg;
- TCGv_i32 t_desc;
- int desc, poff;
+ int poff;
/* Load the first quadword using the normal predicated load helpers. */
- desc = simd_desc(16, 16, zt);
- t_desc = tcg_const_i32(desc);
-
poff = pred_full_reg_offset(s, pg);
if (vsz > 16) {
/*
@@ -4924,15 +5723,16 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz)
t_pg = tcg_temp_new_ptr();
tcg_gen_addi_ptr(t_pg, cpu_env, poff);
- fns[s->be_data == MO_BE][msz](cpu_env, t_pg, addr, t_desc);
+ gen_helper_gvec_mem *fn
+ = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
+ fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
tcg_temp_free_ptr(t_pg);
- tcg_temp_free_i32(t_desc);
/* Replicate that first quadword. */
if (vsz > 16) {
- unsigned dofs = vec_full_reg_offset(s, zt);
- tcg_gen_gvec_dup_mem(4, dofs + 16, dofs, vsz - 16, vsz - 16);
+ int doff = vec_full_reg_offset(s, zt);
+ tcg_gen_gvec_dup_mem(4, doff + 16, doff, vsz - 16, vsz - 16);
}
}
@@ -4946,7 +5746,7 @@ static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a)
TCGv_i64 addr = new_tmp_a64(s);
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz);
tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
- do_ldrq(s, a->rd, a->pg, addr, msz);
+ do_ldrq(s, a->rd, a->pg, addr, a->dtype);
}
return true;
}
@@ -4956,7 +5756,100 @@ static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a)
if (sve_access_check(s)) {
TCGv_i64 addr = new_tmp_a64(s);
tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16);
- do_ldrq(s, a->rd, a->pg, addr, dtype_msz(a->dtype));
+ do_ldrq(s, a->rd, a->pg, addr, a->dtype);
+ }
+ return true;
+}
+
+static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned vsz_r32;
+ TCGv_ptr t_pg;
+ int poff, doff;
+
+ if (vsz < 32) {
+ /*
+ * Note that this UNDEFINED check comes after CheckSVEEnabled()
+ * in the ARM pseudocode, which is the sve_access_check() done
+ * in our caller. We should not now return false from the caller.
+ */
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Load the first octaword using the normal predicated load helpers. */
+
+ poff = pred_full_reg_offset(s, pg);
+ if (vsz > 32) {
+ /*
+ * Zero-extend the first 32 bits of the predicate into a temporary.
+ * This avoids triggering an assert making sure we don't have bits
+ * set within a predicate beyond VQ, but we have lowered VQ to 2
+ * for this load operation.
+ */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+#ifdef HOST_WORDS_BIGENDIAN
+ poff += 4;
+#endif
+ tcg_gen_ld32u_i64(tmp, cpu_env, poff);
+
+ poff = offsetof(CPUARMState, vfp.preg_tmp);
+ tcg_gen_st_i64(tmp, cpu_env, poff);
+ tcg_temp_free_i64(tmp);
+ }
+
+ t_pg = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(t_pg, cpu_env, poff);
+
+ gen_helper_gvec_mem *fn
+ = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
+ fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
+
+ tcg_temp_free_ptr(t_pg);
+
+ /*
+ * Replicate that first octaword.
+ * The replication happens in units of 32; if the full vector size
+ * is not a multiple of 32, the final bits are zeroed.
+ */
+ doff = vec_full_reg_offset(s, zt);
+ vsz_r32 = QEMU_ALIGN_DOWN(vsz, 32);
+ if (vsz >= 64) {
+ tcg_gen_gvec_dup_mem(5, doff + 32, doff, vsz_r32 - 32, vsz_r32 - 32);
+ }
+ vsz -= vsz_r32;
+ if (vsz) {
+ tcg_gen_gvec_dup_imm(MO_64, doff + vsz_r32, vsz, vsz, 0);
+ }
+}
+
+static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ if (a->rm == 31) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_i64 addr = new_tmp_a64(s);
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
+ do_ldro(s, a->rd, a->pg, addr, a->dtype);
+ }
+ return true;
+}
+
+static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_i64 addr = new_tmp_a64(s);
+ tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
+ do_ldro(s, a->rd, a->pg, addr, a->dtype);
}
return true;
}
@@ -5591,6 +6484,14 @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
return true;
}
+static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return trans_LD1_zprz(s, a);
+}
+
/* Indexed by [mte][be][xs][msz]. */
static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = {
{ /* MTE Inactive */
@@ -5743,6 +6644,14 @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
return true;
}
+static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return trans_ST1_zprz(s, a);
+}
+
/*
* Prefetches
*/
@@ -5795,3 +6704,1936 @@ static bool trans_MOVPRFX_z(DisasContext *s, arg_rpr_esz *a)
{
return do_movz_zpz(s, a->rd, a->rn, a->pg, a->esz, false);
}
+
+/*
+ * SVE2 Integer Multiply - Unpredicated
+ */
+
+static bool trans_MUL_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzz(s, tcg_gen_gvec_mul, a->esz, a->rd, a->rn, a->rm);
+ }
+ return true;
+}
+
+static bool do_sve2_zzz_ool(DisasContext *s, arg_rrr_esz *a,
+ gen_helper_gvec_3 *fn)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0);
+ }
+ return true;
+}
+
+static bool trans_SMULH_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h,
+ gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d,
+ };
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_UMULH_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h,
+ gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d,
+ };
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b);
+}
+
+static bool trans_SQDMULH_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h,
+ gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d,
+ };
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SQRDMULH_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h,
+ gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d,
+ };
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
+}
+
+/*
+ * SVE2 Integer - Predicated
+ */
+
+static bool do_sve2_zpzz_ool(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_4 *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzz_ool(s, a, fn);
+}
+
+static bool trans_SADALP_zpzz(DisasContext *s, arg_rprr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[3] = {
+ gen_helper_sve2_sadalp_zpzz_h,
+ gen_helper_sve2_sadalp_zpzz_s,
+ gen_helper_sve2_sadalp_zpzz_d,
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]);
+}
+
+static bool trans_UADALP_zpzz(DisasContext *s, arg_rprr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[3] = {
+ gen_helper_sve2_uadalp_zpzz_h,
+ gen_helper_sve2_uadalp_zpzz_s,
+ gen_helper_sve2_uadalp_zpzz_d,
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]);
+}
+
+/*
+ * SVE2 integer unary operations (predicated)
+ */
+
+static bool do_sve2_zpz_ool(DisasContext *s, arg_rpr_esz *a,
+ gen_helper_gvec_3 *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ool(s, a, fn);
+}
+
+static bool trans_URECPE(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz != 2) {
+ return false;
+ }
+ return do_sve2_zpz_ool(s, a, gen_helper_sve2_urecpe_s);
+}
+
+static bool trans_URSQRTE(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz != 2) {
+ return false;
+ }
+ return do_sve2_zpz_ool(s, a, gen_helper_sve2_ursqrte_s);
+}
+
+static bool trans_SQABS(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h,
+ gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d,
+ };
+ return do_sve2_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SQNEG(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h,
+ gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d,
+ };
+ return do_sve2_zpz_ool(s, a, fns[a->esz]);
+}
+
+#define DO_SVE2_ZPZZ(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_4 * const fns[4] = { \
+ gen_helper_sve2_##name##_zpzz_b, gen_helper_sve2_##name##_zpzz_h, \
+ gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d, \
+ }; \
+ return do_sve2_zpzz_ool(s, a, fns[a->esz]); \
+}
+
+DO_SVE2_ZPZZ(SQSHL, sqshl)
+DO_SVE2_ZPZZ(SQRSHL, sqrshl)
+DO_SVE2_ZPZZ(SRSHL, srshl)
+
+DO_SVE2_ZPZZ(UQSHL, uqshl)
+DO_SVE2_ZPZZ(UQRSHL, uqrshl)
+DO_SVE2_ZPZZ(URSHL, urshl)
+
+DO_SVE2_ZPZZ(SHADD, shadd)
+DO_SVE2_ZPZZ(SRHADD, srhadd)
+DO_SVE2_ZPZZ(SHSUB, shsub)
+
+DO_SVE2_ZPZZ(UHADD, uhadd)
+DO_SVE2_ZPZZ(URHADD, urhadd)
+DO_SVE2_ZPZZ(UHSUB, uhsub)
+
+DO_SVE2_ZPZZ(ADDP, addp)
+DO_SVE2_ZPZZ(SMAXP, smaxp)
+DO_SVE2_ZPZZ(UMAXP, umaxp)
+DO_SVE2_ZPZZ(SMINP, sminp)
+DO_SVE2_ZPZZ(UMINP, uminp)
+
+DO_SVE2_ZPZZ(SQADD_zpzz, sqadd)
+DO_SVE2_ZPZZ(UQADD_zpzz, uqadd)
+DO_SVE2_ZPZZ(SQSUB_zpzz, sqsub)
+DO_SVE2_ZPZZ(UQSUB_zpzz, uqsub)
+DO_SVE2_ZPZZ(SUQADD, suqadd)
+DO_SVE2_ZPZZ(USQADD, usqadd)
+
+/*
+ * SVE2 Widening Integer Arithmetic
+ */
+
+static bool do_sve2_zzw_ool(DisasContext *s, arg_rrr_esz *a,
+ gen_helper_gvec_3 *fn, int data)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vsz, vsz, data, fn);
+ }
+ return true;
+}
+
+#define DO_SVE2_ZZZ_TB(NAME, name, SEL1, SEL2) \
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
+{ \
+ static gen_helper_gvec_3 * const fns[4] = { \
+ NULL, gen_helper_sve2_##name##_h, \
+ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
+ }; \
+ return do_sve2_zzw_ool(s, a, fns[a->esz], (SEL2 << 1) | SEL1); \
+}
+
+DO_SVE2_ZZZ_TB(SADDLB, saddl, false, false)
+DO_SVE2_ZZZ_TB(SSUBLB, ssubl, false, false)
+DO_SVE2_ZZZ_TB(SABDLB, sabdl, false, false)
+
+DO_SVE2_ZZZ_TB(UADDLB, uaddl, false, false)
+DO_SVE2_ZZZ_TB(USUBLB, usubl, false, false)
+DO_SVE2_ZZZ_TB(UABDLB, uabdl, false, false)
+
+DO_SVE2_ZZZ_TB(SADDLT, saddl, true, true)
+DO_SVE2_ZZZ_TB(SSUBLT, ssubl, true, true)
+DO_SVE2_ZZZ_TB(SABDLT, sabdl, true, true)
+
+DO_SVE2_ZZZ_TB(UADDLT, uaddl, true, true)
+DO_SVE2_ZZZ_TB(USUBLT, usubl, true, true)
+DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true)
+
+DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true)
+DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true)
+DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false)
+
+DO_SVE2_ZZZ_TB(SQDMULLB_zzz, sqdmull_zzz, false, false)
+DO_SVE2_ZZZ_TB(SQDMULLT_zzz, sqdmull_zzz, true, true)
+
+DO_SVE2_ZZZ_TB(SMULLB_zzz, smull_zzz, false, false)
+DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true)
+
+DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false)
+DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true)
+
+static bool do_eor_tb(DisasContext *s, arg_rrr_esz *a, bool sel1)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h,
+ gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d,
+ };
+ return do_sve2_zzw_ool(s, a, fns[a->esz], (!sel1 << 1) | sel1);
+}
+
+static bool trans_EORBT(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_eor_tb(s, a, false);
+}
+
+static bool trans_EORTB(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_eor_tb(s, a, true);
+}
+
+static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
+ NULL, gen_helper_sve2_pmull_d,
+ };
+ if (a->esz == 0 && !dc_isar_feature(aa64_sve2_pmull128, s)) {
+ return false;
+ }
+ return do_sve2_zzw_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_PMULLB(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_trans_pmull(s, a, false);
+}
+
+static bool trans_PMULLT(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_trans_pmull(s, a, true);
+}
+
+#define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
+{ \
+ static gen_helper_gvec_3 * const fns[4] = { \
+ NULL, gen_helper_sve2_##name##_h, \
+ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
+ }; \
+ return do_sve2_zzw_ool(s, a, fns[a->esz], SEL2); \
+}
+
+DO_SVE2_ZZZ_WTB(SADDWB, saddw, false)
+DO_SVE2_ZZZ_WTB(SADDWT, saddw, true)
+DO_SVE2_ZZZ_WTB(SSUBWB, ssubw, false)
+DO_SVE2_ZZZ_WTB(SSUBWT, ssubw, true)
+
+DO_SVE2_ZZZ_WTB(UADDWB, uaddw, false)
+DO_SVE2_ZZZ_WTB(UADDWT, uaddw, true)
+DO_SVE2_ZZZ_WTB(USUBWB, usubw, false)
+DO_SVE2_ZZZ_WTB(USUBWT, usubw, true)
+
+static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
+{
+ int top = imm & 1;
+ int shl = imm >> 1;
+ int halfbits = 4 << vece;
+
+ if (top) {
+ if (shl == halfbits) {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+ } else {
+ tcg_gen_sari_vec(vece, d, n, halfbits);
+ tcg_gen_shli_vec(vece, d, d, shl);
+ }
+ } else {
+ tcg_gen_shli_vec(vece, d, n, halfbits);
+ tcg_gen_sari_vec(vece, d, d, halfbits - shl);
+ }
+}
+
+static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm)
+{
+ int halfbits = 4 << vece;
+ int top = imm & 1;
+ int shl = (imm >> 1);
+ int shift;
+ uint64_t mask;
+
+ mask = MAKE_64BIT_MASK(0, halfbits);
+ mask <<= shl;
+ mask = dup_const(vece, mask);
+
+ shift = shl - top * halfbits;
+ if (shift < 0) {
+ tcg_gen_shri_i64(d, n, -shift);
+ } else {
+ tcg_gen_shli_i64(d, n, shift);
+ }
+ tcg_gen_andi_i64(d, d, mask);
+}
+
+static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
+{
+ gen_ushll_i64(MO_16, d, n, imm);
+}
+
+static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
+{
+ gen_ushll_i64(MO_32, d, n, imm);
+}
+
+static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
+{
+ gen_ushll_i64(MO_64, d, n, imm);
+}
+
+static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
+{
+ int halfbits = 4 << vece;
+ int top = imm & 1;
+ int shl = imm >> 1;
+
+ if (top) {
+ if (shl == halfbits) {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+ } else {
+ tcg_gen_shri_vec(vece, d, n, halfbits);
+ tcg_gen_shli_vec(vece, d, d, shl);
+ }
+ } else {
+ if (shl == 0) {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+ } else {
+ tcg_gen_shli_vec(vece, d, n, halfbits);
+ tcg_gen_shri_vec(vece, d, d, halfbits - shl);
+ }
+ }
+}
+
+static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a,
+ bool sel, bool uns)
+{
+ static const TCGOpcode sshll_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, 0
+ };
+ static const TCGOpcode ushll_list[] = {
+ INDEX_op_shli_vec, INDEX_op_shri_vec, 0
+ };
+ static const GVecGen2i ops[2][3] = {
+ { { .fniv = gen_sshll_vec,
+ .opt_opc = sshll_list,
+ .fno = gen_helper_sve2_sshll_h,
+ .vece = MO_16 },
+ { .fniv = gen_sshll_vec,
+ .opt_opc = sshll_list,
+ .fno = gen_helper_sve2_sshll_s,
+ .vece = MO_32 },
+ { .fniv = gen_sshll_vec,
+ .opt_opc = sshll_list,
+ .fno = gen_helper_sve2_sshll_d,
+ .vece = MO_64 } },
+ { { .fni8 = gen_ushll16_i64,
+ .fniv = gen_ushll_vec,
+ .opt_opc = ushll_list,
+ .fno = gen_helper_sve2_ushll_h,
+ .vece = MO_16 },
+ { .fni8 = gen_ushll32_i64,
+ .fniv = gen_ushll_vec,
+ .opt_opc = ushll_list,
+ .fno = gen_helper_sve2_ushll_s,
+ .vece = MO_32 },
+ { .fni8 = gen_ushll64_i64,
+ .fniv = gen_ushll_vec,
+ .opt_opc = ushll_list,
+ .fno = gen_helper_sve2_ushll_d,
+ .vece = MO_64 } },
+ };
+
+ if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vsz, vsz, (a->imm << 1) | sel,
+ &ops[uns][a->esz]);
+ }
+ return true;
+}
+
+static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, false, false);
+}
+
+static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, true, false);
+}
+
+static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, false, true);
+}
+
+static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, true, true);
+}
+
+static bool trans_BEXT(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
+ gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
+ };
+ if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
+ return false;
+ }
+ return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
+}
+
+static bool trans_BDEP(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
+ gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
+ };
+ if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
+ return false;
+ }
+ return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
+}
+
+static bool trans_BGRP(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
+ gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
+ };
+ if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
+ return false;
+ }
+ return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
+}
+
+static bool do_cadd(DisasContext *s, arg_rrr_esz *a, bool sq, bool rot)
+{
+ static gen_helper_gvec_3 * const fns[2][4] = {
+ { gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
+ gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d },
+ { gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h,
+ gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d },
+ };
+ return do_sve2_zzw_ool(s, a, fns[sq][a->esz], rot);
+}
+
+static bool trans_CADD_rot90(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_cadd(s, a, false, false);
+}
+
+static bool trans_CADD_rot270(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_cadd(s, a, false, true);
+}
+
+static bool trans_SQCADD_rot90(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_cadd(s, a, true, false);
+}
+
+static bool trans_SQCADD_rot270(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_cadd(s, a, true, true);
+}
+
+static bool do_sve2_zzzz_ool(DisasContext *s, arg_rrrr_esz *a,
+ gen_helper_gvec_4 *fn, int data)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
+ }
+ return true;
+}
+
+static bool do_abal(DisasContext *s, arg_rrrr_esz *a, bool uns, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[2][4] = {
+ { NULL, gen_helper_sve2_sabal_h,
+ gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d },
+ { NULL, gen_helper_sve2_uabal_h,
+ gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d },
+ };
+ return do_sve2_zzzz_ool(s, a, fns[uns][a->esz], sel);
+}
+
+static bool trans_SABALB(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_abal(s, a, false, false);
+}
+
+static bool trans_SABALT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_abal(s, a, false, true);
+}
+
+static bool trans_UABALB(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_abal(s, a, true, false);
+}
+
+static bool trans_UABALT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_abal(s, a, true, true);
+}
+
+static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[2] = {
+ gen_helper_sve2_adcl_s,
+ gen_helper_sve2_adcl_d,
+ };
+ /*
+ * Note that in this case the ESZ field encodes both size and sign.
+ * Split out 'subtract' into bit 1 of the data field for the helper.
+ */
+ return do_sve2_zzzz_ool(s, a, fns[a->esz & 1], (a->esz & 2) | sel);
+}
+
+static bool trans_ADCLB(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_adcl(s, a, false);
+}
+
+static bool trans_ADCLT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_adcl(s, a, true);
+}
+
+static bool do_sve2_fn2i(DisasContext *s, arg_rri_esz *a, GVecGen2iFn *fn)
+{
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned rd_ofs = vec_full_reg_offset(s, a->rd);
+ unsigned rn_ofs = vec_full_reg_offset(s, a->rn);
+ fn(a->esz, rd_ofs, rn_ofs, a->imm, vsz, vsz);
+ }
+ return true;
+}
+
+static bool trans_SSRA(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_ssra);
+}
+
+static bool trans_USRA(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_usra);
+}
+
+static bool trans_SRSRA(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_srsra);
+}
+
+static bool trans_URSRA(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_ursra);
+}
+
+static bool trans_SRI(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_sri);
+}
+
+static bool trans_SLI(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_sli);
+}
+
+static bool do_sve2_fn_zzz(DisasContext *s, arg_rrr_esz *a, GVecGen3Fn *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm);
+ }
+ return true;
+}
+
+static bool trans_SABA(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sve2_fn_zzz(s, a, gen_gvec_saba);
+}
+
+static bool trans_UABA(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sve2_fn_zzz(s, a, gen_gvec_uaba);
+}
+
+static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a,
+ const GVecGen2 ops[3])
+{
+ if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 ||
+ !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vsz, vsz, &ops[a->esz]);
+ }
+ return true;
+}
+
+static const TCGOpcode sqxtn_list[] = {
+ INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
+};
+
+static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t mask = (1ull << halfbits) - 1;
+ int64_t min = -1ull << (halfbits - 1);
+ int64_t max = -min - 1;
+
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, d, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, d, d, t);
+ tcg_gen_dupi_vec(vece, t, mask);
+ tcg_gen_and_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_sqxtnb_vec,
+ .opt_opc = sqxtn_list,
+ .fno = gen_helper_sve2_sqxtnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqxtnb_vec,
+ .opt_opc = sqxtn_list,
+ .fno = gen_helper_sve2_sqxtnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqxtnb_vec,
+ .opt_opc = sqxtn_list,
+ .fno = gen_helper_sve2_sqxtnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t mask = (1ull << halfbits) - 1;
+ int64_t min = -1ull << (halfbits - 1);
+ int64_t max = -min - 1;
+
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_dupi_vec(vece, t, mask);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_sqxtnt_vec,
+ .opt_opc = sqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqxtnt_vec,
+ .opt_opc = sqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqxtnt_vec,
+ .opt_opc = sqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static const TCGOpcode uqxtn_list[] = {
+ INDEX_op_shli_vec, INDEX_op_umin_vec, 0
+};
+
+static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = (1ull << halfbits) - 1;
+
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_umin_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_uqxtnb_vec,
+ .opt_opc = uqxtn_list,
+ .fno = gen_helper_sve2_uqxtnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqxtnb_vec,
+ .opt_opc = uqxtn_list,
+ .fno = gen_helper_sve2_uqxtnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqxtnb_vec,
+ .opt_opc = uqxtn_list,
+ .fno = gen_helper_sve2_uqxtnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = (1ull << halfbits) - 1;
+
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_uqxtnt_vec,
+ .opt_opc = uqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqxtnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqxtnt_vec,
+ .opt_opc = uqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqxtnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqxtnt_vec,
+ .opt_opc = uqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqxtnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static const TCGOpcode sqxtun_list[] = {
+ INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0
+};
+
+static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = (1ull << halfbits) - 1;
+
+ tcg_gen_dupi_vec(vece, t, 0);
+ tcg_gen_smax_vec(vece, d, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_umin_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_sqxtunb_vec,
+ .opt_opc = sqxtun_list,
+ .fno = gen_helper_sve2_sqxtunb_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqxtunb_vec,
+ .opt_opc = sqxtun_list,
+ .fno = gen_helper_sve2_sqxtunb_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqxtunb_vec,
+ .opt_opc = sqxtun_list,
+ .fno = gen_helper_sve2_sqxtunb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = (1ull << halfbits) - 1;
+
+ tcg_gen_dupi_vec(vece, t, 0);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_sqxtunt_vec,
+ .opt_opc = sqxtun_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtunt_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqxtunt_vec,
+ .opt_opc = sqxtun_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtunt_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqxtunt_vec,
+ .opt_opc = sqxtun_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtunt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static bool do_sve2_shr_narrow(DisasContext *s, arg_rri_esz *a,
+ const GVecGen2i ops[3])
+{
+ if (a->esz < 0 || a->esz > MO_32 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ assert(a->imm > 0 && a->imm <= (8 << a->esz));
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vsz, vsz, a->imm, &ops[a->esz]);
+ }
+ return true;
+}
+
+static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
+{
+ int halfbits = 4 << vece;
+ uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
+
+ tcg_gen_shri_i64(d, n, shr);
+ tcg_gen_andi_i64(d, d, mask);
+}
+
+static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnb_i64(MO_16, d, n, shr);
+}
+
+static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnb_i64(MO_32, d, n, shr);
+}
+
+static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnb_i64(MO_64, d, n, shr);
+}
+
+static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
+
+ tcg_gen_shri_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, mask);
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = { INDEX_op_shri_vec, 0 };
+ static const GVecGen2i ops[3] = {
+ { .fni8 = gen_shrnb16_i64,
+ .fniv = gen_shrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_shrnb_h,
+ .vece = MO_16 },
+ { .fni8 = gen_shrnb32_i64,
+ .fniv = gen_shrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_shrnb_s,
+ .vece = MO_32 },
+ { .fni8 = gen_shrnb64_i64,
+ .fniv = gen_shrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_shrnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
+{
+ int halfbits = 4 << vece;
+ uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
+
+ tcg_gen_shli_i64(n, n, halfbits - shr);
+ tcg_gen_andi_i64(n, n, ~mask);
+ tcg_gen_andi_i64(d, d, mask);
+ tcg_gen_or_i64(d, d, n);
+}
+
+static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnt_i64(MO_16, d, n, shr);
+}
+
+static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnt_i64(MO_32, d, n, shr);
+}
+
+static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ tcg_gen_shri_i64(n, n, shr);
+ tcg_gen_deposit_i64(d, d, n, 32, 32);
+}
+
+static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
+
+ tcg_gen_shli_vec(vece, n, n, halfbits - shr);
+ tcg_gen_dupi_vec(vece, t, mask);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = { INDEX_op_shli_vec, 0 };
+ static const GVecGen2i ops[3] = {
+ { .fni8 = gen_shrnt16_i64,
+ .fniv = gen_shrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_shrnt_h,
+ .vece = MO_16 },
+ { .fni8 = gen_shrnt32_i64,
+ .fniv = gen_shrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_shrnt_s,
+ .vece = MO_32 },
+ { .fni8 = gen_shrnt64_i64,
+ .fniv = gen_shrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_shrnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_RSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_rshrnb_h },
+ { .fno = gen_helper_sve2_rshrnb_s },
+ { .fno = gen_helper_sve2_rshrnb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_rshrnt_h },
+ { .fno = gen_helper_sve2_rshrnt_s },
+ { .fno = gen_helper_sve2_rshrnt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, 0);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRUNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrunb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrunb_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrunb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrunb_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrunb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrunb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, 0);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRUNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
+ INDEX_op_smax_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrunt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrunt_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrunt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrunt_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrunt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrunt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRUNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrunb_h },
+ { .fno = gen_helper_sve2_sqrshrunb_s },
+ { .fno = gen_helper_sve2_sqrshrunb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrunt_h },
+ { .fno = gen_helper_sve2_sqrshrunt_s },
+ { .fno = gen_helper_sve2_sqrshrunt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
+ int64_t min = -max - 1;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
+ int64_t min = -max - 1;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
+ INDEX_op_smax_vec, INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrnb_h },
+ { .fno = gen_helper_sve2_sqrshrnb_s },
+ { .fno = gen_helper_sve2_sqrshrnb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrnt_h },
+ { .fno = gen_helper_sve2_sqrshrnt_s },
+ { .fno = gen_helper_sve2_sqrshrnt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_shri_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shri_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_uqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_uqshrnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_uqshrnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_uqshrnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_shri_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_uqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqshrnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqshrnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqshrnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_uqrshrnb_h },
+ { .fno = gen_helper_sve2_uqrshrnb_s },
+ { .fno = gen_helper_sve2_uqrshrnb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_uqrshrnt_h },
+ { .fno = gen_helper_sve2_uqrshrnt_s },
+ { .fno = gen_helper_sve2_uqrshrnt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+#define DO_SVE2_ZZZ_NARROW(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
+{ \
+ static gen_helper_gvec_3 * const fns[4] = { \
+ NULL, gen_helper_sve2_##name##_h, \
+ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
+ }; \
+ return do_sve2_zzz_ool(s, a, fns[a->esz]); \
+}
+
+DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb)
+DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt)
+DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb)
+DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt)
+
+DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb)
+DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt)
+DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb)
+DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
+
+static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_flags_4 *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_ppzz_flags(s, a, fn);
+}
+
+#define DO_SVE2_PPZZ_MATCH(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_flags_4 * const fns[4] = { \
+ gen_helper_sve2_##name##_ppzz_b, gen_helper_sve2_##name##_ppzz_h, \
+ NULL, NULL \
+ }; \
+ return do_sve2_ppzz_flags(s, a, fns[a->esz]); \
+}
+
+DO_SVE2_PPZZ_MATCH(MATCH, match)
+DO_SVE2_PPZZ_MATCH(NMATCH, nmatch)
+
+static bool trans_HISTCNT(DisasContext *s, arg_rprr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[2] = {
+ gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
+ };
+ if (a->esz < 2) {
+ return false;
+ }
+ return do_sve2_zpzz_ool(s, a, fns[a->esz - 2]);
+}
+
+static bool trans_HISTSEG(DisasContext *s, arg_rrr_esz *a)
+{
+ if (a->esz != 0) {
+ return false;
+ }
+ return do_sve2_zzz_ool(s, a, gen_helper_sve2_histseg);
+}
+
+static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzz_fp(s, a, fn);
+}
+
+#define DO_SVE2_ZPZZ_FP(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
+ NULL, gen_helper_sve2_##name##_zpzz_h, \
+ gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \
+ }; \
+ return do_sve2_zpzz_fp(s, a, fns[a->esz]); \
+}
+
+DO_SVE2_ZPZZ_FP(FADDP, faddp)
+DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp)
+DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp)
+DO_SVE2_ZPZZ_FP(FMAXP, fmaxp)
+DO_SVE2_ZPZZ_FP(FMINP, fminp)
+
+/*
+ * SVE Integer Multiply-Add (unpredicated)
+ */
+
+static bool trans_FMMLA(DisasContext *s, arg_rrrr_esz *a)
+{
+ gen_helper_gvec_4_ptr *fn;
+
+ switch (a->esz) {
+ case MO_32:
+ if (!dc_isar_feature(aa64_sve_f32mm, s)) {
+ return false;
+ }
+ fn = gen_helper_fmmla_s;
+ break;
+ case MO_64:
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ fn = gen_helper_fmmla_d;
+ break;
+ default:
+ return false;
+ }
+
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ status, vsz, vsz, 0, fn);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool do_sqdmlal_zzzw(DisasContext *s, arg_rrrr_esz *a,
+ bool sel1, bool sel2)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_sqdmlal_zzzw_h,
+ gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1);
+}
+
+static bool do_sqdmlsl_zzzw(DisasContext *s, arg_rrrr_esz *a,
+ bool sel1, bool sel2)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_sqdmlsl_zzzw_h,
+ gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1);
+}
+
+static bool trans_SQDMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlal_zzzw(s, a, false, false);
+}
+
+static bool trans_SQDMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlal_zzzw(s, a, true, true);
+}
+
+static bool trans_SQDMLALBT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlal_zzzw(s, a, false, true);
+}
+
+static bool trans_SQDMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlsl_zzzw(s, a, false, false);
+}
+
+static bool trans_SQDMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlsl_zzzw(s, a, true, true);
+}
+
+static bool trans_SQDMLSLBT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlsl_zzzw(s, a, false, true);
+}
+
+static bool trans_SQRDMLAH_zzzz(DisasContext *s, arg_rrrr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h,
+ gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], 0);
+}
+
+static bool trans_SQRDMLSH_zzzz(DisasContext *s, arg_rrrr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h,
+ gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], 0);
+}
+
+static bool do_smlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_smlal_zzzw_h,
+ gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_SMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_smlal_zzzw(s, a, false);
+}
+
+static bool trans_SMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_smlal_zzzw(s, a, true);
+}
+
+static bool do_umlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_umlal_zzzw_h,
+ gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_UMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_umlal_zzzw(s, a, false);
+}
+
+static bool trans_UMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_umlal_zzzw(s, a, true);
+}
+
+static bool do_smlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_smlsl_zzzw_h,
+ gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_SMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_smlsl_zzzw(s, a, false);
+}
+
+static bool trans_SMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_smlsl_zzzw(s, a, true);
+}
+
+static bool do_umlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_umlsl_zzzw_h,
+ gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_UMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_umlsl_zzzw(s, a, false);
+}
+
+static bool trans_UMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_umlsl_zzzw(s, a, true);
+}
+
+static bool trans_CMLA_zzzz(DisasContext *s, arg_CMLA_zzzz *a)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h,
+ gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d,
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot);
+ }
+ return true;
+}
+
+static bool trans_CDOT_zzzz(DisasContext *s, arg_CMLA_zzzz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s) || a->esz < MO_32) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_helper_gvec_4 *fn = (a->esz == MO_32
+ ? gen_helper_sve2_cdot_zzzz_s
+ : gen_helper_sve2_cdot_zzzz_d);
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->rot);
+ }
+ return true;
+}
+
+static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h,
+ gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d,
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot);
+ }
+ return true;
+}
+
+static bool trans_USDOT_zzzz(DisasContext *s, arg_USDOT_zzzz *a)
+{
+ if (a->esz != 2 || !dc_isar_feature(aa64_sve_i8mm, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ vsz, vsz, 0, gen_helper_gvec_usdot_b);
+ }
+ return true;
+}
+
+static bool trans_AESMC(DisasContext *s, arg_AESMC *a)
+{
+ if (!dc_isar_feature(aa64_sve2_aes, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zz(s, gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt);
+ }
+ return true;
+}
+
+static bool do_aese(DisasContext *s, arg_rrr_esz *a, bool decrypt)
+{
+ if (!dc_isar_feature(aa64_sve2_aes, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, gen_helper_crypto_aese,
+ a->rd, a->rn, a->rm, decrypt);
+ }
+ return true;
+}
+
+static bool trans_AESE(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_aese(s, a, false);
+}
+
+static bool trans_AESD(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_aese(s, a, true);
+}
+
+static bool do_sm4(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn)
+{
+ if (!dc_isar_feature(aa64_sve2_sm4, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0);
+ }
+ return true;
+}
+
+static bool trans_SM4E(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sm4(s, a, gen_helper_crypto_sm4e);
+}
+
+static bool trans_SM4EKEY(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sm4(s, a, gen_helper_crypto_sm4ekey);
+}
+
+static bool trans_RAX1(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2_sha3, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzz(s, gen_gvec_rax1, MO_64, a->rd, a->rn, a->rm);
+ }
+ return true;
+}
+
+static bool trans_FCVTNT_sh(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_sh);
+}
+
+static bool trans_FCVTNT_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_ds);
+}
+
+static bool trans_FCVTLT_hs(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_hs);
+}
+
+static bool trans_FCVTLT_sd(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_sd);
+}
+
+static bool trans_FCVTX_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve_fcvt_ds);
+}
+
+static bool trans_FCVTXNT_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve2_fcvtnt_ds);
+}
+
+static bool trans_FLOGB(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3_ptr * const fns[] = {
+ NULL, gen_helper_flogb_h,
+ gen_helper_flogb_s, gen_helper_flogb_d
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s) || fns[a->esz] == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_ptr status =
+ fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ unsigned vsz = vec_full_reg_size(s);
+
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ pred_full_reg_offset(s, a->pg),
+ status, vsz, vsz, 0, fns[a->esz]);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ cpu_env, vsz, vsz, (sel << 1) | sub,
+ gen_helper_sve2_fmlal_zzzw_s);
+ }
+ return true;
+}
+
+static bool trans_FMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, false, false);
+}
+
+static bool trans_FMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, false, true);
+}
+
+static bool trans_FMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, true, false);
+}
+
+static bool trans_FMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, true, true);
+}
+
+static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ cpu_env, vsz, vsz,
+ (a->index << 2) | (sel << 1) | sub,
+ gen_helper_sve2_fmlal_zzxw_s);
+ }
+ return true;
+}
+
+static bool trans_FMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, false, false);
+}
+
+static bool trans_FMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, false, true);
+}
+
+static bool trans_FMLSLB_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, true, false);
+}
+
+static bool trans_FMLSLT_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, true, true);
+}
+
+static bool do_i8mm_zzzz_ool(DisasContext *s, arg_rrrr_esz *a,
+ gen_helper_gvec_4 *fn, int data)
+{
+ if (!dc_isar_feature(aa64_sve_i8mm, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
+ }
+ return true;
+}
+
+static bool trans_SMMLA(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_smmla_b, 0);
+}
+
+static bool trans_USMMLA(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_usmmla_b, 0);
+}
+
+static bool trans_UMMLA(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_ummla_b, 0);
+}
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index 3fbeae87cb..e84b438340 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -22,33 +22,82 @@
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "fpu/softfloat.h"
+#include "qemu/int128.h"
#include "vec_internal.h"
-/* Note that vector data is stored in host-endian 64-bit chunks,
- so addressing units smaller than that needs a host-endian fixup. */
-#ifdef HOST_WORDS_BIGENDIAN
-#define H1(x) ((x) ^ 7)
-#define H2(x) ((x) ^ 3)
-#define H4(x) ((x) ^ 1)
-#else
-#define H1(x) (x)
-#define H2(x) (x)
-#define H4(x) (x)
-#endif
-
-/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
-static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
- bool neg, bool round, uint32_t *sat)
+/* Signed saturating rounding doubling multiply-accumulate high half, 8-bit */
+int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3,
+ bool neg, bool round)
{
/*
* Simplify:
- * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16
- * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15
+ * = ((a3 << 8) + ((e1 * e2) << 1) + (round << 7)) >> 8
+ * = ((a3 << 7) + (e1 * e2) + (round << 6)) >> 7
*/
int32_t ret = (int32_t)src1 * src2;
if (neg) {
ret = -ret;
}
+ ret += ((int32_t)src3 << 7) + (round << 6);
+ ret >>= 7;
+
+ if (ret != (int8_t)ret) {
+ ret = (ret < 0 ? INT8_MIN : INT8_MAX);
+ }
+ return ret;
+}
+
+void HELPER(sve2_sqrdmlah_b)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = do_sqrdmlah_b(n[i], m[i], a[i], false, true);
+ }
+}
+
+void HELPER(sve2_sqrdmlsh_b)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = do_sqrdmlah_b(n[i], m[i], a[i], true, true);
+ }
+}
+
+void HELPER(sve2_sqdmulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = do_sqrdmlah_b(n[i], m[i], 0, false, false);
+ }
+}
+
+void HELPER(sve2_sqrdmulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = do_sqrdmlah_b(n[i], m[i], 0, false, true);
+ }
+}
+
+/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
+int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
+ bool neg, bool round, uint32_t *sat)
+{
+ /* Simplify similarly to do_sqrdmlah_b above. */
+ int32_t ret = (int32_t)src1 * src2;
+ if (neg) {
+ ret = -ret;
+ }
ret += ((int32_t)src3 << 15) + (round << 14);
ret >>= 15;
@@ -133,11 +182,87 @@ void HELPER(neon_sqrdmulh_h)(void *vd, void *vn, void *vm,
clear_tail(d, opr_sz, simd_maxsz(desc));
}
+void HELPER(sve2_sqrdmlah_h)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm, *a = va;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], a[i], false, true, &discard);
+ }
+}
+
+void HELPER(sve2_sqrdmlsh_h)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm, *a = va;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], a[i], true, true, &discard);
+ }
+}
+
+void HELPER(sve2_sqdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, false, &discard);
+ }
+}
+
+void HELPER(sve2_sqrdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, true, &discard);
+ }
+}
+
+void HELPER(sve2_sqdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
+ int16_t mm = m[i];
+ for (j = 0; j < 16 / 2; ++j) {
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, false, &discard);
+ }
+ }
+}
+
+void HELPER(sve2_sqrdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
+ int16_t mm = m[i];
+ for (j = 0; j < 16 / 2; ++j) {
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, true, &discard);
+ }
+ }
+}
+
/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
-static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
- bool neg, bool round, uint32_t *sat)
+int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
+ bool neg, bool round, uint32_t *sat)
{
- /* Simplify similarly to int_qrdmlah_s16 above. */
+ /* Simplify similarly to do_sqrdmlah_b above. */
int64_t ret = (int64_t)src1 * src2;
if (neg) {
ret = -ret;
@@ -220,197 +345,253 @@ void HELPER(neon_sqrdmulh_s)(void *vd, void *vn, void *vm,
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-/* Integer 8 and 16-bit dot-product.
- *
- * Note that for the loops herein, host endianness does not matter
- * with respect to the ordering of data within the 64-bit lanes.
- * All elements are treated equally, no matter where they are.
- */
-
-void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, uint32_t desc)
+void HELPER(sve2_sqrdmlah_s)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc);
- uint32_t *d = vd;
- int8_t *n = vn, *m = vm;
+ int32_t *d = vd, *n = vn, *m = vm, *a = va;
+ uint32_t discard;
for (i = 0; i < opr_sz / 4; ++i) {
- d[i] += n[i * 4 + 0] * m[i * 4 + 0]
- + n[i * 4 + 1] * m[i * 4 + 1]
- + n[i * 4 + 2] * m[i * 4 + 2]
- + n[i * 4 + 3] * m[i * 4 + 3];
+ d[i] = do_sqrdmlah_s(n[i], m[i], a[i], false, true, &discard);
}
- clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, uint32_t desc)
+void HELPER(sve2_sqrdmlsh_s)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc);
- uint32_t *d = vd;
- uint8_t *n = vn, *m = vm;
+ int32_t *d = vd, *n = vn, *m = vm, *a = va;
+ uint32_t discard;
for (i = 0; i < opr_sz / 4; ++i) {
- d[i] += n[i * 4 + 0] * m[i * 4 + 0]
- + n[i * 4 + 1] * m[i * 4 + 1]
- + n[i * 4 + 2] * m[i * 4 + 2]
- + n[i * 4 + 3] * m[i * 4 + 3];
+ d[i] = do_sqrdmlah_s(n[i], m[i], a[i], true, true, &discard);
}
- clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, uint32_t desc)
+void HELPER(sve2_sqdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc);
- uint64_t *d = vd;
- int16_t *n = vn, *m = vm;
+ int32_t *d = vd, *n = vn, *m = vm;
+ uint32_t discard;
- for (i = 0; i < opr_sz / 8; ++i) {
- d[i] += (int64_t)n[i * 4 + 0] * m[i * 4 + 0]
- + (int64_t)n[i * 4 + 1] * m[i * 4 + 1]
- + (int64_t)n[i * 4 + 2] * m[i * 4 + 2]
- + (int64_t)n[i * 4 + 3] * m[i * 4 + 3];
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, false, &discard);
}
- clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc)
+void HELPER(sve2_sqrdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc);
- uint64_t *d = vd;
- uint16_t *n = vn, *m = vm;
+ int32_t *d = vd, *n = vn, *m = vm;
+ uint32_t discard;
- for (i = 0; i < opr_sz / 8; ++i) {
- d[i] += (uint64_t)n[i * 4 + 0] * m[i * 4 + 0]
- + (uint64_t)n[i * 4 + 1] * m[i * 4 + 1]
- + (uint64_t)n[i * 4 + 2] * m[i * 4 + 2]
- + (uint64_t)n[i * 4 + 3] * m[i * 4 + 3];
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, true, &discard);
}
- clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
+void HELPER(sve2_sqdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc)
{
- intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
- intptr_t index = simd_data(desc);
- uint32_t *d = vd;
- int8_t *n = vn;
- int8_t *m_indexed = (int8_t *)vm + H4(index) * 4;
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
+ int32_t mm = m[i];
+ for (j = 0; j < 16 / 4; ++j) {
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, false, &discard);
+ }
+ }
+}
- /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
- * Otherwise opr_sz is a multiple of 16.
- */
- segend = MIN(4, opr_sz_4);
- i = 0;
- do {
- int8_t m0 = m_indexed[i * 4 + 0];
- int8_t m1 = m_indexed[i * 4 + 1];
- int8_t m2 = m_indexed[i * 4 + 2];
- int8_t m3 = m_indexed[i * 4 + 3];
-
- do {
- d[i] += n[i * 4 + 0] * m0
- + n[i * 4 + 1] * m1
- + n[i * 4 + 2] * m2
- + n[i * 4 + 3] * m3;
- } while (++i < segend);
- segend = i + 4;
- } while (i < opr_sz_4);
+void HELPER(sve2_sqrdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
+ int32_t mm = m[i];
+ for (j = 0; j < 16 / 4; ++j) {
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, true, &discard);
+ }
+ }
+}
- clear_tail(d, opr_sz, simd_maxsz(desc));
+/* Signed saturating rounding doubling multiply-accumulate high half, 64-bit */
+static int64_t do_sat128_d(Int128 r)
+{
+ int64_t ls = int128_getlo(r);
+ int64_t hs = int128_gethi(r);
+
+ if (unlikely(hs != (ls >> 63))) {
+ return hs < 0 ? INT64_MIN : INT64_MAX;
+ }
+ return ls;
}
-void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
+int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a, bool neg, bool round)
{
- intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
- intptr_t index = simd_data(desc);
- uint32_t *d = vd;
- uint8_t *n = vn;
- uint8_t *m_indexed = (uint8_t *)vm + H4(index) * 4;
+ uint64_t l, h;
+ Int128 r, t;
- /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
- * Otherwise opr_sz is a multiple of 16.
- */
- segend = MIN(4, opr_sz_4);
- i = 0;
- do {
- uint8_t m0 = m_indexed[i * 4 + 0];
- uint8_t m1 = m_indexed[i * 4 + 1];
- uint8_t m2 = m_indexed[i * 4 + 2];
- uint8_t m3 = m_indexed[i * 4 + 3];
-
- do {
- d[i] += n[i * 4 + 0] * m0
- + n[i * 4 + 1] * m1
- + n[i * 4 + 2] * m2
- + n[i * 4 + 3] * m3;
- } while (++i < segend);
- segend = i + 4;
- } while (i < opr_sz_4);
+ /* As in do_sqrdmlah_b, but with 128-bit arithmetic. */
+ muls64(&l, &h, m, n);
+ r = int128_make128(l, h);
+ if (neg) {
+ r = int128_neg(r);
+ }
+ if (a) {
+ t = int128_exts64(a);
+ t = int128_lshift(t, 63);
+ r = int128_add(r, t);
+ }
+ if (round) {
+ t = int128_exts64(1ll << 62);
+ r = int128_add(r, t);
+ }
+ r = int128_rshift(r, 63);
- clear_tail(d, opr_sz, simd_maxsz(desc));
+ return do_sat128_d(r);
}
-void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+void HELPER(sve2_sqrdmlah_d)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
{
- intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
- intptr_t index = simd_data(desc);
- uint64_t *d = vd;
- int16_t *n = vn;
- int16_t *m_indexed = (int16_t *)vm + index * 4;
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm, *a = va;
- /* This is supported by SVE only, so opr_sz is always a multiple of 16.
- * Process the entire segment all at once, writing back the results
- * only after we've consumed all of the inputs.
- */
- for (i = 0; i < opr_sz_8 ; i += 2) {
- uint64_t d0, d1;
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = do_sqrdmlah_d(n[i], m[i], a[i], false, true);
+ }
+}
- d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0];
- d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1];
- d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2];
- d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3];
- d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0];
- d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1];
- d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2];
- d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3];
+void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm, *a = va;
- d[i + 0] += d0;
- d[i + 1] += d1;
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = do_sqrdmlah_d(n[i], m[i], a[i], true, true);
}
+}
- clear_tail(d, opr_sz, simd_maxsz(desc));
+void HELPER(sve2_sqdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = do_sqrdmlah_d(n[i], m[i], 0, false, false);
+ }
}
-void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+void HELPER(sve2_sqrdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
{
- intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
- intptr_t index = simd_data(desc);
- uint64_t *d = vd;
- uint16_t *n = vn;
- uint16_t *m_indexed = (uint16_t *)vm + index * 4;
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm;
- /* This is supported by SVE only, so opr_sz is always a multiple of 16.
- * Process the entire segment all at once, writing back the results
- * only after we've consumed all of the inputs.
- */
- for (i = 0; i < opr_sz_8 ; i += 2) {
- uint64_t d0, d1;
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = do_sqrdmlah_d(n[i], m[i], 0, false, true);
+ }
+}
- d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0];
- d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1];
- d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2];
- d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3];
- d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0];
- d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1];
- d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2];
- d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3];
+void HELPER(sve2_sqdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx;
- d[i + 0] += d0;
- d[i + 1] += d1;
+ for (i = 0; i < opr_sz / 8; i += 16 / 8) {
+ int64_t mm = m[i];
+ for (j = 0; j < 16 / 8; ++j) {
+ d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, false);
+ }
}
+}
- clear_tail(d, opr_sz, simd_maxsz(desc));
+void HELPER(sve2_sqrdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx;
+
+ for (i = 0; i < opr_sz / 8; i += 16 / 8) {
+ int64_t mm = m[i];
+ for (j = 0; j < 16 / 8; ++j) {
+ d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, true);
+ }
+ }
}
+/* Integer 8 and 16-bit dot-product.
+ *
+ * Note that for the loops herein, host endianness does not matter
+ * with respect to the ordering of data within the quad-width lanes.
+ * All elements are treated equally, no matter where they are.
+ */
+
+#define DO_DOT(NAME, TYPED, TYPEN, TYPEM) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ TYPED *d = vd, *a = va; \
+ TYPEN *n = vn; \
+ TYPEM *m = vm; \
+ for (i = 0; i < opr_sz / sizeof(TYPED); ++i) { \
+ d[i] = (a[i] + \
+ (TYPED)n[i * 4 + 0] * m[i * 4 + 0] + \
+ (TYPED)n[i * 4 + 1] * m[i * 4 + 1] + \
+ (TYPED)n[i * 4 + 2] * m[i * 4 + 2] + \
+ (TYPED)n[i * 4 + 3] * m[i * 4 + 3]); \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_DOT(gvec_sdot_b, int32_t, int8_t, int8_t)
+DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t)
+DO_DOT(gvec_usdot_b, uint32_t, uint8_t, int8_t)
+DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t)
+DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t)
+
+#define DO_DOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i = 0, opr_sz = simd_oprsz(desc); \
+ intptr_t opr_sz_n = opr_sz / sizeof(TYPED); \
+ intptr_t segend = MIN(16 / sizeof(TYPED), opr_sz_n); \
+ intptr_t index = simd_data(desc); \
+ TYPED *d = vd, *a = va; \
+ TYPEN *n = vn; \
+ TYPEM *m_indexed = (TYPEM *)vm + HD(index) * 4; \
+ do { \
+ TYPED m0 = m_indexed[i * 4 + 0]; \
+ TYPED m1 = m_indexed[i * 4 + 1]; \
+ TYPED m2 = m_indexed[i * 4 + 2]; \
+ TYPED m3 = m_indexed[i * 4 + 3]; \
+ do { \
+ d[i] = (a[i] + \
+ n[i * 4 + 0] * m0 + \
+ n[i * 4 + 1] * m1 + \
+ n[i * 4 + 2] * m2 + \
+ n[i * 4 + 3] * m3); \
+ } while (++i < segend); \
+ segend = i + 4; \
+ } while (i < opr_sz_n); \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_DOT_IDX(gvec_sdot_idx_b, int32_t, int8_t, int8_t, H4)
+DO_DOT_IDX(gvec_udot_idx_b, uint32_t, uint8_t, uint8_t, H4)
+DO_DOT_IDX(gvec_sudot_idx_b, int32_t, int8_t, uint8_t, H4)
+DO_DOT_IDX(gvec_usdot_idx_b, int32_t, uint8_t, int8_t, H4)
+DO_DOT_IDX(gvec_sdot_idx_h, int64_t, int16_t, int16_t, )
+DO_DOT_IDX(gvec_udot_idx_h, uint64_t, uint16_t, uint16_t, )
+
void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
void *vfpst, uint32_t desc)
{
@@ -495,13 +676,11 @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float16 *d = vd;
- float16 *n = vn;
- float16 *m = vm;
+ float16 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -518,19 +697,17 @@ void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
float16 e4 = e2;
float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag;
- d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst);
- d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst);
+ d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst);
+ d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float16 *d = vd;
- float16 *n = vn;
- float16 *m = vm;
+ float16 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -554,20 +731,18 @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
float16 e2 = n[H2(j + flip)];
float16 e4 = e2;
- d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst);
- d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst);
+ d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst);
+ d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst);
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float32 *d = vd;
- float32 *n = vn;
- float32 *m = vm;
+ float32 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -584,19 +759,17 @@ void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
float32 e4 = e2;
float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag;
- d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst);
- d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst);
+ d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst);
+ d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float32 *d = vd;
- float32 *n = vn;
- float32 *m = vm;
+ float32 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -620,20 +793,18 @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
float32 e2 = n[H4(j + flip)];
float32 e4 = e2;
- d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst);
- d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst);
+ d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst);
+ d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst);
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float64 *d = vd;
- float64 *n = vn;
- float64 *m = vm;
+ float64 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -650,8 +821,8 @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
float64 e4 = e2;
float64 e3 = m[i + 1 - flip] ^ neg_imag;
- d[i] = float64_muladd(e2, e1, d[i], 0, fpst);
- d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst);
+ d[i] = float64_muladd(e2, e1, a[i], 0, fpst);
+ d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
@@ -1497,6 +1668,27 @@ void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm,
get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
}
+void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va,
+ void *venv, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
+ CPUARMState *env = venv;
+ float_status *status = &env->vfp.fp_status;
+ bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16);
+
+ for (i = 0; i < oprsz; i += sizeof(float32)) {
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negn;
+ float16 mm_16 = *(float16 *)(vm + H1_2(i + sel));
+ float32 nn = float16_to_float32_by_bits(nn_16, fz16);
+ float32 mm = float16_to_float32_by_bits(mm_16, fz16);
+ float32 aa = *(float32 *)(va + H1_4(i));
+
+ *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status);
+ }
+}
+
static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst,
uint32_t desc, bool fz16)
{
@@ -1541,6 +1733,32 @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm,
get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
}
+void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va,
+ void *venv, uint32_t desc)
+{
+ intptr_t i, j, oprsz = simd_oprsz(desc);
+ uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16);
+ CPUARMState *env = venv;
+ float_status *status = &env->vfp.fp_status;
+ bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16);
+
+ for (i = 0; i < oprsz; i += 16) {
+ float16 mm_16 = *(float16 *)(vm + i + idx);
+ float32 mm = float16_to_float32_by_bits(mm_16, fz16);
+
+ for (j = 0; j < 16; j += sizeof(float32)) {
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negn;
+ float32 nn = float16_to_float32_by_bits(nn_16, fz16);
+ float32 aa = *(float32 *)(va + H1_4(i + j));
+
+ *(float32 *)(vd + H1_4(i + j)) =
+ float32_muladd(nn, mm, aa, 0, status);
+ }
+ }
+}
+
void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc);
@@ -1750,6 +1968,30 @@ void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
d[i] = pmull_h(nn, mm);
}
}
+
+static uint64_t pmull_d(uint64_t op1, uint64_t op2)
+{
+ uint64_t result = 0;
+ int i;
+
+ for (i = 0; i < 32; ++i) {
+ uint64_t mask = -((op1 >> i) & 1);
+ result ^= (op2 << i) & mask;
+ }
+ return result;
+}
+
+void HELPER(sve2_pmull_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t sel = H4(simd_data(desc));
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint32_t *n = vn, *m = vm;
+ uint64_t *d = vd;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = pmull_d(n[2 * i + sel], m[2 * i + sel]);
+ }
+}
#endif
#define DO_CMP0(NAME, TYPE, OP) \
@@ -1985,3 +2227,188 @@ void HELPER(simd_tblx)(void *vd, void *vm, void *venv, uint32_t desc)
clear_tail(vd, oprsz, simd_maxsz(desc));
}
#endif
+
+/*
+ * NxN -> N highpart multiply
+ *
+ * TODO: expose this as a generic vector operation.
+ */
+
+void HELPER(gvec_smulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ((int32_t)n[i] * m[i]) >> 8;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_smulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = ((int32_t)n[i] * m[i]) >> 16;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_smulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int32_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = ((int64_t)n[i] * m[i]) >> 32;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_smulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint64_t discard;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ muls64(&discard, &d[i], n[i], m[i]);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_umulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ((uint32_t)n[i] * m[i]) >> 8;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_umulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint16_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = ((uint32_t)n[i] * m[i]) >> 16;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_umulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint32_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = ((uint64_t)n[i] * m[i]) >> 32;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_umulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint64_t discard;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ mulu64(&discard, &d[i], n[i], m[i]);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_xar_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ int shr = simd_data(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ror64(n[i] ^ m[i], shr);
+ }
+ clear_tail(d, opr_sz * 8, simd_maxsz(desc));
+}
+
+/*
+ * Integer matrix-multiply accumulate
+ */
+
+static uint32_t do_smmla_b(uint32_t sum, void *vn, void *vm)
+{
+ int8_t *n = vn, *m = vm;
+
+ for (intptr_t k = 0; k < 8; ++k) {
+ sum += n[H1(k)] * m[H1(k)];
+ }
+ return sum;
+}
+
+static uint32_t do_ummla_b(uint32_t sum, void *vn, void *vm)
+{
+ uint8_t *n = vn, *m = vm;
+
+ for (intptr_t k = 0; k < 8; ++k) {
+ sum += n[H1(k)] * m[H1(k)];
+ }
+ return sum;
+}
+
+static uint32_t do_usmmla_b(uint32_t sum, void *vn, void *vm)
+{
+ uint8_t *n = vn;
+ int8_t *m = vm;
+
+ for (intptr_t k = 0; k < 8; ++k) {
+ sum += n[H1(k)] * m[H1(k)];
+ }
+ return sum;
+}
+
+static void do_mmla_b(void *vd, void *vn, void *vm, void *va, uint32_t desc,
+ uint32_t (*inner_loop)(uint32_t, void *, void *))
+{
+ intptr_t seg, opr_sz = simd_oprsz(desc);
+
+ for (seg = 0; seg < opr_sz; seg += 16) {
+ uint32_t *d = vd + seg;
+ uint32_t *a = va + seg;
+ uint32_t sum0, sum1, sum2, sum3;
+
+ /*
+ * Process the entire segment at once, writing back the
+ * results only after we've consumed all of the inputs.
+ *
+ * Key to indicies by column:
+ * i j i j
+ */
+ sum0 = a[H4(0 + 0)];
+ sum0 = inner_loop(sum0, vn + seg + 0, vm + seg + 0);
+ sum1 = a[H4(0 + 1)];
+ sum1 = inner_loop(sum1, vn + seg + 0, vm + seg + 8);
+ sum2 = a[H4(2 + 0)];
+ sum2 = inner_loop(sum2, vn + seg + 8, vm + seg + 0);
+ sum3 = a[H4(2 + 1)];
+ sum3 = inner_loop(sum3, vn + seg + 8, vm + seg + 8);
+
+ d[H4(0)] = sum0;
+ d[H4(1)] = sum1;
+ d[H4(2)] = sum2;
+ d[H4(3)] = sum3;
+ }
+ clear_tail(vd, opr_sz, simd_maxsz(desc));
+}
+
+#define DO_MMLA_B(NAME, INNER) \
+ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+ { do_mmla_b(vd, vn, vm, va, desc, INNER); }
+
+DO_MMLA_B(gvec_smmla_b, do_smmla_b)
+DO_MMLA_B(gvec_ummla_b, do_ummla_b)
+DO_MMLA_B(gvec_usmmla_b, do_usmmla_b)
diff --git a/target/arm/vec_internal.h b/target/arm/vec_internal.h
index e3eb3e7a6b..dba481e001 100644
--- a/target/arm/vec_internal.h
+++ b/target/arm/vec_internal.h
@@ -20,6 +20,30 @@
#ifndef TARGET_ARM_VEC_INTERNALS_H
#define TARGET_ARM_VEC_INTERNALS_H
+/*
+ * Note that vector data is stored in host-endian 64-bit chunks,
+ * so addressing units smaller than that needs a host-endian fixup.
+ *
+ * The H<N> macros are used when indexing an array of elements of size N.
+ *
+ * The H1_<N> macros are used when performing byte arithmetic and then
+ * casting the final pointer to a type of size N.
+ */
+#ifdef HOST_WORDS_BIGENDIAN
+#define H1(x) ((x) ^ 7)
+#define H1_2(x) ((x) ^ 6)
+#define H1_4(x) ((x) ^ 4)
+#define H2(x) ((x) ^ 3)
+#define H4(x) ((x) ^ 1)
+#else
+#define H1(x) (x)
+#define H1_2(x) (x)
+#define H1_4(x) (x)
+#define H2(x) (x)
+#define H4(x) (x)
+#endif
+
+
static inline void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz)
{
uint64_t *d = vd + opr_sz;
@@ -30,4 +54,147 @@ static inline void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz)
}
}
+static inline int32_t do_sqrshl_bhs(int32_t src, int32_t shift, int bits,
+ bool round, uint32_t *sat)
+{
+ if (shift <= -bits) {
+ /* Rounding the sign bit always produces 0. */
+ if (round) {
+ return 0;
+ }
+ return src >> 31;
+ } else if (shift < 0) {
+ if (round) {
+ src >>= -shift - 1;
+ return (src >> 1) + (src & 1);
+ }
+ return src >> -shift;
+ } else if (shift < bits) {
+ int32_t val = src << shift;
+ if (bits == 32) {
+ if (!sat || val >> shift == src) {
+ return val;
+ }
+ } else {
+ int32_t extval = sextract32(val, 0, bits);
+ if (!sat || val == extval) {
+ return extval;
+ }
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return (1u << (bits - 1)) - (src >= 0);
+}
+
+static inline uint32_t do_uqrshl_bhs(uint32_t src, int32_t shift, int bits,
+ bool round, uint32_t *sat)
+{
+ if (shift <= -(bits + round)) {
+ return 0;
+ } else if (shift < 0) {
+ if (round) {
+ src >>= -shift - 1;
+ return (src >> 1) + (src & 1);
+ }
+ return src >> -shift;
+ } else if (shift < bits) {
+ uint32_t val = src << shift;
+ if (bits == 32) {
+ if (!sat || val >> shift == src) {
+ return val;
+ }
+ } else {
+ uint32_t extval = extract32(val, 0, bits);
+ if (!sat || val == extval) {
+ return extval;
+ }
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return MAKE_64BIT_MASK(0, bits);
+}
+
+static inline int32_t do_suqrshl_bhs(int32_t src, int32_t shift, int bits,
+ bool round, uint32_t *sat)
+{
+ if (sat && src < 0) {
+ *sat = 1;
+ return 0;
+ }
+ return do_uqrshl_bhs(src, shift, bits, round, sat);
+}
+
+static inline int64_t do_sqrshl_d(int64_t src, int64_t shift,
+ bool round, uint32_t *sat)
+{
+ if (shift <= -64) {
+ /* Rounding the sign bit always produces 0. */
+ if (round) {
+ return 0;
+ }
+ return src >> 63;
+ } else if (shift < 0) {
+ if (round) {
+ src >>= -shift - 1;
+ return (src >> 1) + (src & 1);
+ }
+ return src >> -shift;
+ } else if (shift < 64) {
+ int64_t val = src << shift;
+ if (!sat || val >> shift == src) {
+ return val;
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return src < 0 ? INT64_MIN : INT64_MAX;
+}
+
+static inline uint64_t do_uqrshl_d(uint64_t src, int64_t shift,
+ bool round, uint32_t *sat)
+{
+ if (shift <= -(64 + round)) {
+ return 0;
+ } else if (shift < 0) {
+ if (round) {
+ src >>= -shift - 1;
+ return (src >> 1) + (src & 1);
+ }
+ return src >> -shift;
+ } else if (shift < 64) {
+ uint64_t val = src << shift;
+ if (!sat || val >> shift == src) {
+ return val;
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return UINT64_MAX;
+}
+
+static inline int64_t do_suqrshl_d(int64_t src, int64_t shift,
+ bool round, uint32_t *sat)
+{
+ if (sat && src < 0) {
+ *sat = 1;
+ return 0;
+ }
+ return do_uqrshl_d(src, shift, round, sat);
+}
+
+int8_t do_sqrdmlah_b(int8_t, int8_t, int8_t, bool, bool);
+int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *);
+int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *);
+int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool);
+
#endif /* TARGET_ARM_VEC_INTERNALS_H */