aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-05-31 11:10:10 -0700
committerRichard Henderson <richard.henderson@linaro.org>2024-05-31 11:10:10 -0700
commit74abb45dac6979e7ff76172b7f0a24e869405184 (patch)
tree5c9cd353f6a3dfbf4a24da0a45817c70cf6715dd
parent3b2fe44bb7f605f179e5e7feb2c13c2eb3abbb80 (diff)
parent3c3c233677d4f2fe5f35c5d6d6e9b53df48054f4 (diff)
Merge tag 'pull-target-arm-20240531' of https://git.linaro.org/people/pmaydell/qemu-arm into staging
target-arm: * hw/intc/arm_gic: Fix set pending of PPIs * hw/intc/arm_gic: Fix writes to GICD_ITARGETSRn * xilinx_zynq: Add cache controller * xilinx_zynq: Support up to two CPU cores * tests/avocado: update sbsa-ref firmware * sbsa-ref: move to Neoverse-N2 as default * More decodetree conversion of A64 ASIMD insns * docs/system/target-arm: Re-alphabetize board list * Implement FEAT WFxT and enable for '-cpu max' * hw/usb/hcd-ohci: Fix #1510, #303: pid not IN or OUT # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmZZvHgZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3uArEACZgk0hqKtRcEzwdJi7w7ax # ta/Iyl7AA+ngmh0qcE8QX8rzZhcGcKhsaQ8dNESMIBqVi1fS0hmNrIUWhXqmvNmZ # 07WJvQx7Ki9YNX02frjkRZTwWozsbW8uoaXgnngFK93PNh/IoQBRP5T/LIZ5t3d7 # 7I/O/tnS/LZrL6wtP4EbRIEvZ4dfJe3X+uSCHSF8iOYrJLrZCsy/ItJqzY6Y0f96 # iUoOfXjrYH2hM9VkJGHIGy1r9nYRkCxXREQh7ahw/z6mv0nIB1YTS1eR0dH9D1yM # afdby8iPN7k+f3en+2dHfyPjani4vPd1/k9mgLnQtVLOHrdw2APs1Q59YwYhunhe # ZC0Fcp6jBSkcI6LHRY0bRtY0U3SBPrfkSD5sJrNH1obnsSvizeSU3uCq1QmKRCRY # FuARmE77ywY8CURiqfwPSrC/ecSnamueIQNKNPZVQ5ve3dbokp/Gr1eJgcq80ovK # wIKmNhJq60qBcj2zQ1aw1PP3+zvbZ/rl2j0abGbxBH3Kkp9AvALDiLRMciazVWph # vbx7e1Y90Zrs3ap1AAUFUyWexYPNvZWmSGOaWv6Wdt+1Yf/YDW9wrwjVd3eRG9rM # vgNMrccysBUNDpS4s0KSbqLy9AsjqAa41SiKipWFBekUyQFboNpTNfDNCspIPj9m # dnI4fyXkVmSCYFiW2akmjg== # =Jy5P # -----END PGP SIGNATURE----- # gpg: Signature made Fri 31 May 2024 05:03:04 AM PDT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] * tag 'pull-target-arm-20240531' of https://git.linaro.org/people/pmaydell/qemu-arm: (43 commits) hw/usb/hcd-ohci: Fix #1510, #303: pid not IN or OUT target/arm: Implement FEAT WFxT and enable for '-cpu max' accel/tcg: Make TCGCPUOps::cpu_exec_halt return bool for whether to halt docs/system/target-arm: Re-alphabetize board list target/arm: Disable SVE extensions when SVE is disabled target/arm: Convert FCSEL to decodetree target/arm: Convert FMADD, FMSUB, FNMADD, FNMSUB to decodetree target/arm: Convert SQDMULH, SQRDMULH to decodetree target/arm: Tidy SQDMULH, SQRDMULH (vector) target/arm: Convert MLA, MLS to decodetree target/arm: Convert MUL, PMUL to decodetree target/arm: Convert SABA, SABD, UABA, UABD to decodetree target/arm: Convert SMAX, SMIN, UMAX, UMIN to decodetree target/arm: Convert SRHADD, URHADD to decodetree target/arm: Convert SRHADD, URHADD to gvec target/arm: Convert SHSUB, UHSUB to decodetree target/arm: Convert SHSUB, UHSUB to gvec target/arm: Convert SHADD, UHADD to decodetree target/arm: Convert SHADD, UHADD to gvec target/arm: Use TCG_COND_TSTNE in gen_cmtst_vec ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--accel/tcg/cpu-exec.c7
-rw-r--r--docs/system/arm/emulation.rst1
-rw-r--r--docs/system/target-arm.rst6
-rw-r--r--hw/arm/Kconfig1
-rw-r--r--hw/arm/sbsa-ref.c2
-rw-r--r--hw/arm/xilinx_zynq.c55
-rw-r--r--hw/intc/arm_gic.c12
-rw-r--r--hw/usb/hcd-ohci.c5
-rw-r--r--hw/usb/trace-events1
-rw-r--r--include/hw/core/tcg-cpu-ops.h15
-rw-r--r--target/arm/cpu-features.h5
-rw-r--r--target/arm/cpu.c40
-rw-r--r--target/arm/cpu.h3
-rw-r--r--target/arm/cpu64.c6
-rw-r--r--target/arm/helper.c4
-rw-r--r--target/arm/helper.h97
-rw-r--r--target/arm/internals.h8
-rw-r--r--target/arm/machine.c20
-rw-r--r--target/arm/tcg/a64.decode119
-rw-r--r--target/arm/tcg/cpu64.c1
-rw-r--r--target/arm/tcg/gengvec.c689
-rw-r--r--target/arm/tcg/gengvec64.c181
-rw-r--r--target/arm/tcg/neon-dp.decode37
-rw-r--r--target/arm/tcg/neon_helper.c506
-rw-r--r--target/arm/tcg/op_helper.c54
-rw-r--r--target/arm/tcg/translate-a64.c1362
-rw-r--r--target/arm/tcg/translate-a64.h14
-rw-r--r--target/arm/tcg/translate-neon.c118
-rw-r--r--target/arm/tcg/translate.h44
-rw-r--r--target/arm/tcg/vec_helper.c128
-rw-r--r--target/i386/tcg/helper-tcg.h2
-rw-r--r--target/i386/tcg/sysemu/seg_helper.c3
-rw-r--r--tests/avocado/machine_aarch64_sbsaref.py20
33 files changed, 2034 insertions, 1532 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 2972f75b96..6711b58e0b 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -682,11 +682,14 @@ static inline bool cpu_handle_halt(CPUState *cpu)
#ifndef CONFIG_USER_ONLY
if (cpu->halted) {
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+ bool leave_halt;
if (tcg_ops->cpu_exec_halt) {
- tcg_ops->cpu_exec_halt(cpu);
+ leave_halt = tcg_ops->cpu_exec_halt(cpu);
+ } else {
+ leave_halt = cpu_has_work(cpu);
}
- if (!cpu_has_work(cpu)) {
+ if (!leave_halt) {
return true;
}
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
index 7fcea54d8d..1a06a5feb6 100644
--- a/docs/system/arm/emulation.rst
+++ b/docs/system/arm/emulation.rst
@@ -146,6 +146,7 @@ the following architecture extensions:
- FEAT_UAO (Unprivileged Access Override control)
- FEAT_VHE (Virtualization Host Extensions)
- FEAT_VMID16 (16-bit VMID)
+- FEAT_WFxT (WFE and WFI instructions with timeout)
- FEAT_XNX (Translation table stage 2 Unprivileged Execute-never)
For information on the specifics of these extensions, please refer
diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst
index c9d7c0dda7..870d30e350 100644
--- a/docs/system/target-arm.rst
+++ b/docs/system/target-arm.rst
@@ -86,16 +86,16 @@ undocumented; you can get a complete list by running
arm/bananapi_m2u.rst
arm/b-l475e-iot01a.rst
arm/sabrelite
+ arm/highbank
arm/digic
arm/cubieboard
arm/emcraft-sf2
- arm/highbank
arm/musicpal
arm/gumstix
arm/mainstone
arm/kzm
- arm/nrf
arm/nseries
+ arm/nrf
arm/nuvoton
arm/imx25-pdk
arm/orangepi
@@ -107,8 +107,8 @@ undocumented; you can get a complete list by running
arm/stellaris
arm/stm32
arm/virt
- arm/xlnx-versal-virt
arm/xenpvh
+ arm/xlnx-versal-virt
Emulated CPU architecture support
=================================
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
index 8b97683a45..1ad60da7aa 100644
--- a/hw/arm/Kconfig
+++ b/hw/arm/Kconfig
@@ -370,6 +370,7 @@ config ZYNQ
select A9MPCORE
select CADENCE # UART
select PFLASH_CFI02
+ select PL310 # cache controller
select PL330
select SDHCI
select SSI_M25P80
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
index 57c337fd92..e884692f07 100644
--- a/hw/arm/sbsa-ref.c
+++ b/hw/arm/sbsa-ref.c
@@ -891,7 +891,7 @@ static void sbsa_ref_class_init(ObjectClass *oc, void *data)
mc->init = sbsa_ref_init;
mc->desc = "QEMU 'SBSA Reference' ARM Virtual Machine";
- mc->default_cpu_type = ARM_CPU_TYPE_NAME("neoverse-n1");
+ mc->default_cpu_type = ARM_CPU_TYPE_NAME("neoverse-n2");
mc->valid_cpu_types = valid_cpu_types;
mc->max_cpus = 512;
mc->pci_allow_0_address = true;
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
index fc3abcbe88..7f7a3d23fb 100644
--- a/hw/arm/xilinx_zynq.c
+++ b/hw/arm/xilinx_zynq.c
@@ -84,9 +84,12 @@ static const int dma_irqs[8] = {
0xe3401000 + ARMV7_IMM16(extract32((val), 16, 16)), /* movt r1 ... */ \
0xe5801000 + (addr)
+#define ZYNQ_MAX_CPUS 2
+
struct ZynqMachineState {
MachineState parent;
Clock *ps_clk;
+ ARMCPU *cpu[ZYNQ_MAX_CPUS];
};
static void zynq_write_board_setup(ARMCPU *cpu,
@@ -176,13 +179,13 @@ static inline int zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq,
static void zynq_init(MachineState *machine)
{
ZynqMachineState *zynq_machine = ZYNQ_MACHINE(machine);
- ARMCPU *cpu;
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *ocm_ram = g_new(MemoryRegion, 1);
DeviceState *dev, *slcr;
SysBusDevice *busdev;
qemu_irq pic[64];
int n;
+ unsigned int smp_cpus = machine->smp.cpus;
/* max 2GB ram */
if (machine->ram_size > 2 * GiB) {
@@ -190,21 +193,26 @@ static void zynq_init(MachineState *machine)
exit(EXIT_FAILURE);
}
- cpu = ARM_CPU(object_new(machine->cpu_type));
+ for (n = 0; n < smp_cpus; n++) {
+ Object *cpuobj = object_new(machine->cpu_type);
- /* By default A9 CPUs have EL3 enabled. This board does not
- * currently support EL3 so the CPU EL3 property is disabled before
- * realization.
- */
- if (object_property_find(OBJECT(cpu), "has_el3")) {
- object_property_set_bool(OBJECT(cpu), "has_el3", false, &error_fatal);
- }
+ /*
+ * By default A9 CPUs have EL3 enabled. This board does not currently
+ * support EL3 so the CPU EL3 property is disabled before realization.
+ */
+ if (object_property_find(cpuobj, "has_el3")) {
+ object_property_set_bool(cpuobj, "has_el3", false, &error_fatal);
+ }
- object_property_set_int(OBJECT(cpu), "midr", ZYNQ_BOARD_MIDR,
- &error_fatal);
- object_property_set_int(OBJECT(cpu), "reset-cbar", MPCORE_PERIPHBASE,
- &error_fatal);
- qdev_realize(DEVICE(cpu), NULL, &error_fatal);
+ object_property_set_int(cpuobj, "midr", ZYNQ_BOARD_MIDR,
+ &error_fatal);
+ object_property_set_int(cpuobj, "reset-cbar", MPCORE_PERIPHBASE,
+ &error_fatal);
+
+ qdev_realize(DEVICE(cpuobj), NULL, &error_fatal);
+
+ zynq_machine->cpu[n] = ARM_CPU(cpuobj);
+ }
/* DDR remapped to address zero. */
memory_region_add_subregion(address_space_mem, 0, machine->ram);
@@ -237,14 +245,19 @@ static void zynq_init(MachineState *machine)
sysbus_mmio_map(SYS_BUS_DEVICE(slcr), 0, 0xF8000000);
dev = qdev_new(TYPE_A9MPCORE_PRIV);
- qdev_prop_set_uint32(dev, "num-cpu", 1);
+ qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
busdev = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(busdev, &error_fatal);
sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE);
- sysbus_connect_irq(busdev, 0,
- qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ));
- sysbus_connect_irq(busdev, 1,
- qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_FIQ));
+ zynq_binfo.gic_cpu_if_addr = MPCORE_PERIPHBASE + 0x100;
+ sysbus_create_varargs("l2x0", MPCORE_PERIPHBASE + 0x2000, NULL);
+ for (n = 0; n < smp_cpus; n++) {
+ DeviceState *cpudev = DEVICE(zynq_machine->cpu[n]);
+ sysbus_connect_irq(busdev, (2 * n) + 0,
+ qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
+ sysbus_connect_irq(busdev, (2 * n) + 1,
+ qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
+ }
for (n = 0; n < 64; n++) {
pic[n] = qdev_get_gpio_in(dev, n);
@@ -349,7 +362,7 @@ static void zynq_init(MachineState *machine)
zynq_binfo.board_setup_addr = BOARD_SETUP_ADDR;
zynq_binfo.write_board_setup = zynq_write_board_setup;
- arm_load_kernel(cpu, machine, &zynq_binfo);
+ arm_load_kernel(zynq_machine->cpu[0], machine, &zynq_binfo);
}
static void zynq_machine_class_init(ObjectClass *oc, void *data)
@@ -361,7 +374,7 @@ static void zynq_machine_class_init(ObjectClass *oc, void *data)
MachineClass *mc = MACHINE_CLASS(oc);
mc->desc = "Xilinx Zynq Platform Baseboard for Cortex-A9";
mc->init = zynq_init;
- mc->max_cpus = 1;
+ mc->max_cpus = ZYNQ_MAX_CPUS;
mc->no_sdcard = 1;
mc->ignore_memory_transaction_failures = true;
mc->valid_cpu_types = valid_cpu_types;
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index e4b8437f8b..806832439b 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -1308,12 +1308,15 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
for (i = 0; i < 8; i++) {
if (value & (1 << i)) {
+ int mask = (irq < GIC_INTERNAL) ? (1 << cpu)
+ : GIC_DIST_TARGET(irq + i);
+
if (s->security_extn && !attrs.secure &&
!GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i));
+ GIC_DIST_SET_PENDING(irq + i, mask);
}
}
} else if (offset < 0x300) {
@@ -1407,6 +1410,13 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
value = ALL_CPU_MASK;
}
s->irq_target[irq] = value & ALL_CPU_MASK;
+ if (irq >= GIC_INTERNAL && s->irq_state[irq].pending) {
+ /*
+ * Changing the target of an interrupt that is currently
+ * pending updates the set of CPUs it is pending on.
+ */
+ s->irq_state[irq].pending = value & ALL_CPU_MASK;
+ }
}
} else if (offset < 0xf00) {
/* Interrupt Configuration. */
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index fc8fc91a1d..acd6016980 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -927,6 +927,11 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
case OHCI_TD_DIR_SETUP:
str = "setup";
pid = USB_TOKEN_SETUP;
+ if (OHCI_BM(ed->flags, ED_EN) > 0) { /* setup only allowed to ep 0 */
+ trace_usb_ohci_td_bad_pid(str, ed->flags, td.flags);
+ ohci_die(ohci);
+ return 1;
+ }
break;
default:
trace_usb_ohci_td_bad_direction(dir);
diff --git a/hw/usb/trace-events b/hw/usb/trace-events
index ed7dc210d3..fd7b90d70c 100644
--- a/hw/usb/trace-events
+++ b/hw/usb/trace-events
@@ -28,6 +28,7 @@ usb_ohci_iso_td_data_overrun(int ret, ssize_t len) "DataOverrun %d > %zu"
usb_ohci_iso_td_data_underrun(int ret) "DataUnderrun %d"
usb_ohci_iso_td_nak(int ret) "got NAK/STALL %d"
usb_ohci_iso_td_bad_response(int ret) "Bad device response %d"
+usb_ohci_td_bad_pid(const char *s, uint32_t edf, uint32_t tdf) "Bad pid %s: ed.flags 0x%x td.flags 0x%x"
usb_ohci_port_attach(int index) "port #%d"
usb_ohci_port_detach(int index) "port #%d"
usb_ohci_port_wakeup(int index) "port #%d"
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
index 9387d38748..099de3375e 100644
--- a/include/hw/core/tcg-cpu-ops.h
+++ b/include/hw/core/tcg-cpu-ops.h
@@ -115,8 +115,19 @@ struct TCGCPUOps {
void (*do_interrupt)(CPUState *cpu);
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
- /** @cpu_exec_halt: Callback for handling halt in cpu_exec */
- void (*cpu_exec_halt)(CPUState *cpu);
+ /**
+ * @cpu_exec_halt: Callback for handling halt in cpu_exec.
+ *
+ * The target CPU should do any special processing here that it needs
+ * to do when the CPU is in the halted state.
+ *
+ * Return true to indicate that the CPU should now leave halt, false
+ * if it should remain in the halted state.
+ *
+ * If this method is not provided, the default is to do nothing, and
+ * to leave halt if cpu_has_work() returns true.
+ */
+ bool (*cpu_exec_halt)(CPUState *cpu);
/**
* @tlb_fill: Handle a softmmu tlb miss
*
diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h
index b300d0446d..c59ca104fe 100644
--- a/target/arm/cpu-features.h
+++ b/target/arm/cpu-features.h
@@ -571,6 +571,11 @@ static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0;
}
+static inline bool isar_feature_aa64_wfxt(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, WFXT) >= 2;
+}
+
static inline bool isar_feature_aa64_hbc(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, BC) != 0;
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 77f8c9c748..35fa281f1b 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1132,6 +1132,35 @@ static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
return arm_cpu_data_is_big_endian(env);
}
+#ifdef CONFIG_TCG
+static bool arm_cpu_exec_halt(CPUState *cs)
+{
+ bool leave_halt = cpu_has_work(cs);
+
+ if (leave_halt) {
+ /* We're about to come out of WFI/WFE: disable the WFxT timer */
+ ARMCPU *cpu = ARM_CPU(cs);
+ if (cpu->wfxt_timer) {
+ timer_del(cpu->wfxt_timer);
+ }
+ }
+ return leave_halt;
+}
+#endif
+
+static void arm_wfxt_timer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+
+ /*
+ * We expect the CPU to be halted; this will cause arm_cpu_is_work()
+ * to return true (so we will come out of halt even with no other
+ * pending interrupt), and the TCG accelerator's cpu_exec_interrupt()
+ * function auto-clears the CPU_INTERRUPT_EXITTB flag for us.
+ */
+ cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
+}
#endif
static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
@@ -1877,6 +1906,9 @@ static void arm_cpu_finalizefn(Object *obj)
if (cpu->pmu_timer) {
timer_free(cpu->pmu_timer);
}
+ if (cpu->wfxt_timer) {
+ timer_free(cpu->wfxt_timer);
+ }
#endif
}
@@ -2369,6 +2401,13 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
#endif
}
+#ifndef CONFIG_USER_ONLY
+ if (tcg_enabled() && cpu_isar_feature(aa64_wfxt, cpu)) {
+ cpu->wfxt_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ arm_wfxt_timer_cb, cpu);
+ }
+#endif
+
if (tcg_enabled()) {
/*
* Don't report some architectural features in the ID registers
@@ -2625,6 +2664,7 @@ static const TCGCPUOps arm_tcg_ops = {
#else
.tlb_fill = arm_cpu_tlb_fill,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
+ .cpu_exec_halt = arm_cpu_exec_halt,
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index c17264c239..3841359d0f 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -866,6 +866,9 @@ struct ArchCPU {
* pmu_op_finish() - it does not need other handling during migration
*/
QEMUTimer *pmu_timer;
+ /* Timer used for WFxT timeouts */
+ QEMUTimer *wfxt_timer;
+
/* GPIO outputs for generic timer */
qemu_irq gt_timer_outputs[NUM_GTIMERS];
/* GPIO output for GICv3 maintenance interrupt signal */
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index c15d086049..862d2b92fa 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -109,7 +109,11 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
* No explicit bits enabled, and no implicit bits from sve-max-vq.
*/
if (!cpu_isar_feature(aa64_sve, cpu)) {
- /* SVE is disabled and so are all vector lengths. Good. */
+ /*
+ * SVE is disabled and so are all vector lengths. Good.
+ * Disable all SVE extensions as well.
+ */
+ cpu->isar.id_aa64zfr0 = 0;
return;
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7587635960..ce31957235 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -2665,7 +2665,7 @@ static CPAccessResult gt_stimer_access(CPUARMState *env,
}
}
-static uint64_t gt_get_countervalue(CPUARMState *env)
+uint64_t gt_get_countervalue(CPUARMState *env)
{
ARMCPU *cpu = env_archcpu(env);
@@ -2800,7 +2800,7 @@ static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
return gt_get_countervalue(env) - gt_phys_cnt_offset(env);
}
-static uint64_t gt_virt_cnt_offset(CPUARMState *env)
+uint64_t gt_virt_cnt_offset(CPUARMState *env)
{
uint64_t hcr;
diff --git a/target/arm/helper.h b/target/arm/helper.h
index f830531dd3..eca2043fc2 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -53,6 +53,7 @@ DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl)
DEF_HELPER_1(setend, void, env)
DEF_HELPER_2(wfi, void, env, i32)
DEF_HELPER_1(wfe, void, env)
+DEF_HELPER_2(wfit, void, env, i64)
DEF_HELPER_1(yield, void, env)
DEF_HELPER_1(pre_hvc, void, env)
DEF_HELPER_2(pre_smc, void, env, i32)
@@ -268,50 +269,6 @@ DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, ptr)
DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32)
/* neon_helper.c */
-DEF_HELPER_FLAGS_3(neon_qadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_qadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_qadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_qadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_qadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_qadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_uqadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_uqadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_uqadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_uqadd_s64, TCG_CALL_NO_RWG, i64, env, i64, i64)
-DEF_HELPER_FLAGS_3(neon_sqadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_sqadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_sqadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(neon_sqadd_u64, TCG_CALL_NO_RWG, i64, env, i64, i64)
-DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qsub_u32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qsub_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qadd_u64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qadd_s64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qsub_u64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qsub_s64, i64, env, i64, i64)
-
-DEF_HELPER_2(neon_hadd_s8, i32, i32, i32)
-DEF_HELPER_2(neon_hadd_u8, i32, i32, i32)
-DEF_HELPER_2(neon_hadd_s16, i32, i32, i32)
-DEF_HELPER_2(neon_hadd_u16, i32, i32, i32)
-DEF_HELPER_2(neon_hadd_s32, s32, s32, s32)
-DEF_HELPER_2(neon_hadd_u32, i32, i32, i32)
-DEF_HELPER_2(neon_rhadd_s8, i32, i32, i32)
-DEF_HELPER_2(neon_rhadd_u8, i32, i32, i32)
-DEF_HELPER_2(neon_rhadd_s16, i32, i32, i32)
-DEF_HELPER_2(neon_rhadd_u16, i32, i32, i32)
-DEF_HELPER_2(neon_rhadd_s32, s32, s32, s32)
-DEF_HELPER_2(neon_rhadd_u32, i32, i32, i32)
-DEF_HELPER_2(neon_hsub_s8, i32, i32, i32)
-DEF_HELPER_2(neon_hsub_u8, i32, i32, i32)
-DEF_HELPER_2(neon_hsub_s16, i32, i32, i32)
-DEF_HELPER_2(neon_hsub_u16, i32, i32, i32)
-DEF_HELPER_2(neon_hsub_s32, s32, s32, s32)
-DEF_HELPER_2(neon_hsub_u32, i32, i32, i32)
-
DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
@@ -351,6 +308,32 @@ DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(neon_sqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_srshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_urshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_2(neon_add_u8, i32, i32, i32)
DEF_HELPER_2(neon_add_u16, i32, i32, i32)
@@ -836,6 +819,22 @@ DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
@@ -970,6 +969,16 @@ DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
diff --git a/target/arm/internals.h b/target/arm/internals.h
index ee3ebd383e..11b5da2562 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1770,4 +1770,12 @@ bool check_watchpoint_in_range(int i, target_ulong addr);
CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
+
+/* Return the current value of the system counter in ticks */
+uint64_t gt_get_countervalue(CPUARMState *env);
+/*
+ * Return the currently applicable offset between the system counter
+ * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2).
+ */
+uint64_t gt_virt_cnt_offset(CPUARMState *env);
#endif
diff --git a/target/arm/machine.c b/target/arm/machine.c
index b2b39b2475..0a722ca7e7 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -242,6 +242,25 @@ static const VMStateDescription vmstate_irq_line_state = {
}
};
+static bool wfxt_timer_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ /* We'll only have the timer object if FEAT_WFxT is implemented */
+ return cpu->wfxt_timer;
+}
+
+static const VMStateDescription vmstate_wfxt_timer = {
+ .name = "cpu/wfxt-timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = wfxt_timer_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_TIMER_PTR(wfxt_timer, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool m_needed(void *opaque)
{
ARMCPU *cpu = opaque;
@@ -957,6 +976,7 @@ const VMStateDescription vmstate_arm_cpu = {
#endif
&vmstate_serror,
&vmstate_irq_line_state,
+ &vmstate_wfxt_timer,
NULL
}
};
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index f48adef5bb..2b7a3254a0 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -32,6 +32,7 @@
&rr_e rd rn esz
&rrr_e rd rn rm esz
&rrx_e rd rn rm idx esz
+&rrrr_e rd rn rm ra esz
&qrr_e q rd rn esz
&qrrr_e q rd rn rm esz
&qrrx_e q rd rn rm idx esz
@@ -42,8 +43,11 @@
@rr_sd ........ ... ..... ...... rn:5 rd:5 &rr_e esz=%esz_sd
@rrr_h ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=1
+@rrr_d ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=3
@rrr_sd ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=%esz_sd
@rrr_hsd ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=%esz_hsd
+@rrr_e ........ esz:2 . rm:5 ...... rn:5 rd:5 &rrr_e
+@r2r_e ........ esz:2 . ..... ...... rm:5 rd:5 &rrr_e rn=%rd
@rrx_h ........ .. .. rm:4 .... . . rn:5 rd:5 &rrx_e esz=1 idx=%hlm
@rrx_s ........ .. . rm:5 .... . . rn:5 rd:5 &rrx_e esz=2 idx=%hl
@@ -59,6 +63,7 @@
@qrrr_h . q:1 ...... ... rm:5 ...... rn:5 rd:5 &qrrr_e esz=1
@qrrr_sd . q:1 ...... ... rm:5 ...... rn:5 rd:5 &qrrr_e esz=%esz_sd
@qrrr_e . q:1 ...... esz:2 . rm:5 ...... rn:5 rd:5 &qrrr_e
+@qr2r_e . q:1 ...... esz:2 . ..... ...... rm:5 rd:5 &qrrr_e rn=%rd
@qrrx_h . q:1 .. .... .. .. rm:4 .... . . rn:5 rd:5 \
&qrrx_e esz=1 idx=%hlm
@@ -225,6 +230,10 @@ ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB
NOP 1101 0101 0000 0011 0010 ---- --- 11111
}
+# System instructions with register argument
+WFET 1101 0101 0000 0011 0001 0000 000 rd:5
+WFIT 1101 0101 0000 0011 0001 0000 001 rd:5
+
# Barriers
CLREX 1101 0101 0000 0011 0011 ---- 010 11111
@@ -744,6 +753,35 @@ FRECPS_s 0101 1110 0.1 ..... 11111 1 ..... ..... @rrr_sd
FRSQRTS_s 0101 1110 110 ..... 00111 1 ..... ..... @rrr_h
FRSQRTS_s 0101 1110 1.1 ..... 11111 1 ..... ..... @rrr_sd
+SQADD_s 0101 1110 ..1 ..... 00001 1 ..... ..... @rrr_e
+UQADD_s 0111 1110 ..1 ..... 00001 1 ..... ..... @rrr_e
+SQSUB_s 0101 1110 ..1 ..... 00101 1 ..... ..... @rrr_e
+UQSUB_s 0111 1110 ..1 ..... 00101 1 ..... ..... @rrr_e
+
+SUQADD_s 0101 1110 ..1 00000 00111 0 ..... ..... @r2r_e
+USQADD_s 0111 1110 ..1 00000 00111 0 ..... ..... @r2r_e
+
+SSHL_s 0101 1110 111 ..... 01000 1 ..... ..... @rrr_d
+USHL_s 0111 1110 111 ..... 01000 1 ..... ..... @rrr_d
+SRSHL_s 0101 1110 111 ..... 01010 1 ..... ..... @rrr_d
+URSHL_s 0111 1110 111 ..... 01010 1 ..... ..... @rrr_d
+SQSHL_s 0101 1110 ..1 ..... 01001 1 ..... ..... @rrr_e
+UQSHL_s 0111 1110 ..1 ..... 01001 1 ..... ..... @rrr_e
+SQRSHL_s 0101 1110 ..1 ..... 01011 1 ..... ..... @rrr_e
+UQRSHL_s 0111 1110 ..1 ..... 01011 1 ..... ..... @rrr_e
+
+ADD_s 0101 1110 111 ..... 10000 1 ..... ..... @rrr_d
+SUB_s 0111 1110 111 ..... 10000 1 ..... ..... @rrr_d
+CMGT_s 0101 1110 111 ..... 00110 1 ..... ..... @rrr_d
+CMHI_s 0111 1110 111 ..... 00110 1 ..... ..... @rrr_d
+CMGE_s 0101 1110 111 ..... 00111 1 ..... ..... @rrr_d
+CMHS_s 0111 1110 111 ..... 00111 1 ..... ..... @rrr_d
+CMTST_s 0101 1110 111 ..... 10001 1 ..... ..... @rrr_d
+CMEQ_s 0111 1110 111 ..... 10001 1 ..... ..... @rrr_d
+
+SQDMULH_s 0101 1110 ..1 ..... 10110 1 ..... ..... @rrr_e
+SQRDMULH_s 0111 1110 ..1 ..... 10110 1 ..... ..... @rrr_e
+
### Advanced SIMD scalar pairwise
FADDP_s 0101 1110 0011 0000 1101 10 ..... ..... @rr_h
@@ -857,6 +895,53 @@ BSL_v 0.10 1110 011 ..... 00011 1 ..... ..... @qrrr_b
BIT_v 0.10 1110 101 ..... 00011 1 ..... ..... @qrrr_b
BIF_v 0.10 1110 111 ..... 00011 1 ..... ..... @qrrr_b
+SQADD_v 0.00 1110 ..1 ..... 00001 1 ..... ..... @qrrr_e
+UQADD_v 0.10 1110 ..1 ..... 00001 1 ..... ..... @qrrr_e
+SQSUB_v 0.00 1110 ..1 ..... 00101 1 ..... ..... @qrrr_e
+UQSUB_v 0.10 1110 ..1 ..... 00101 1 ..... ..... @qrrr_e
+
+SUQADD_v 0.00 1110 ..1 00000 00111 0 ..... ..... @qr2r_e
+USQADD_v 0.10 1110 ..1 00000 00111 0 ..... ..... @qr2r_e
+
+SSHL_v 0.00 1110 ..1 ..... 01000 1 ..... ..... @qrrr_e
+USHL_v 0.10 1110 ..1 ..... 01000 1 ..... ..... @qrrr_e
+SRSHL_v 0.00 1110 ..1 ..... 01010 1 ..... ..... @qrrr_e
+URSHL_v 0.10 1110 ..1 ..... 01010 1 ..... ..... @qrrr_e
+SQSHL_v 0.00 1110 ..1 ..... 01001 1 ..... ..... @qrrr_e
+UQSHL_v 0.10 1110 ..1 ..... 01001 1 ..... ..... @qrrr_e
+SQRSHL_v 0.00 1110 ..1 ..... 01011 1 ..... ..... @qrrr_e
+UQRSHL_v 0.10 1110 ..1 ..... 01011 1 ..... ..... @qrrr_e
+
+ADD_v 0.00 1110 ..1 ..... 10000 1 ..... ..... @qrrr_e
+SUB_v 0.10 1110 ..1 ..... 10000 1 ..... ..... @qrrr_e
+CMGT_v 0.00 1110 ..1 ..... 00110 1 ..... ..... @qrrr_e
+CMHI_v 0.10 1110 ..1 ..... 00110 1 ..... ..... @qrrr_e
+CMGE_v 0.00 1110 ..1 ..... 00111 1 ..... ..... @qrrr_e
+CMHS_v 0.10 1110 ..1 ..... 00111 1 ..... ..... @qrrr_e
+CMTST_v 0.00 1110 ..1 ..... 10001 1 ..... ..... @qrrr_e
+CMEQ_v 0.10 1110 ..1 ..... 10001 1 ..... ..... @qrrr_e
+SHADD_v 0.00 1110 ..1 ..... 00000 1 ..... ..... @qrrr_e
+UHADD_v 0.10 1110 ..1 ..... 00000 1 ..... ..... @qrrr_e
+SHSUB_v 0.00 1110 ..1 ..... 00100 1 ..... ..... @qrrr_e
+UHSUB_v 0.10 1110 ..1 ..... 00100 1 ..... ..... @qrrr_e
+SRHADD_v 0.00 1110 ..1 ..... 00010 1 ..... ..... @qrrr_e
+URHADD_v 0.10 1110 ..1 ..... 00010 1 ..... ..... @qrrr_e
+SMAX_v 0.00 1110 ..1 ..... 01100 1 ..... ..... @qrrr_e
+UMAX_v 0.10 1110 ..1 ..... 01100 1 ..... ..... @qrrr_e
+SMIN_v 0.00 1110 ..1 ..... 01101 1 ..... ..... @qrrr_e
+UMIN_v 0.10 1110 ..1 ..... 01101 1 ..... ..... @qrrr_e
+SABD_v 0.00 1110 ..1 ..... 01110 1 ..... ..... @qrrr_e
+UABD_v 0.10 1110 ..1 ..... 01110 1 ..... ..... @qrrr_e
+SABA_v 0.00 1110 ..1 ..... 01111 1 ..... ..... @qrrr_e
+UABA_v 0.10 1110 ..1 ..... 01111 1 ..... ..... @qrrr_e
+MUL_v 0.00 1110 ..1 ..... 10011 1 ..... ..... @qrrr_e
+PMUL_v 0.10 1110 001 ..... 10011 1 ..... ..... @qrrr_b
+MLA_v 0.00 1110 ..1 ..... 10010 1 ..... ..... @qrrr_e
+MLS_v 0.10 1110 ..1 ..... 10010 1 ..... ..... @qrrr_e
+
+SQDMULH_v 0.00 1110 ..1 ..... 10110 1 ..... ..... @qrrr_e
+SQRDMULH_v 0.10 1110 ..1 ..... 10110 1 ..... ..... @qrrr_e
+
### Advanced SIMD scalar x indexed element
FMUL_si 0101 1111 00 .. .... 1001 . 0 ..... ..... @rrx_h
@@ -875,6 +960,12 @@ FMULX_si 0111 1111 00 .. .... 1001 . 0 ..... ..... @rrx_h
FMULX_si 0111 1111 10 . ..... 1001 . 0 ..... ..... @rrx_s
FMULX_si 0111 1111 11 0 ..... 1001 . 0 ..... ..... @rrx_d
+SQDMULH_si 0101 1111 01 .. .... 1100 . 0 ..... ..... @rrx_h
+SQDMULH_si 0101 1111 10 .. .... 1100 . 0 ..... ..... @rrx_s
+
+SQRDMULH_si 0101 1111 01 .. .... 1101 . 0 ..... ..... @rrx_h
+SQRDMULH_si 0101 1111 10 . ..... 1101 . 0 ..... ..... @rrx_s
+
### Advanced SIMD vector x indexed element
FMUL_vi 0.00 1111 00 .. .... 1001 . 0 ..... ..... @qrrx_h
@@ -897,3 +988,31 @@ FMLAL_vi 0.00 1111 10 .. .... 0000 . 0 ..... ..... @qrrx_h
FMLSL_vi 0.00 1111 10 .. .... 0100 . 0 ..... ..... @qrrx_h
FMLAL2_vi 0.10 1111 10 .. .... 1000 . 0 ..... ..... @qrrx_h
FMLSL2_vi 0.10 1111 10 .. .... 1100 . 0 ..... ..... @qrrx_h
+
+MUL_vi 0.00 1111 01 .. .... 1000 . 0 ..... ..... @qrrx_h
+MUL_vi 0.00 1111 10 . ..... 1000 . 0 ..... ..... @qrrx_s
+
+MLA_vi 0.10 1111 01 .. .... 0000 . 0 ..... ..... @qrrx_h
+MLA_vi 0.10 1111 10 . ..... 0000 . 0 ..... ..... @qrrx_s
+
+MLS_vi 0.10 1111 01 .. .... 0100 . 0 ..... ..... @qrrx_h
+MLS_vi 0.10 1111 10 . ..... 0100 . 0 ..... ..... @qrrx_s
+
+SQDMULH_vi 0.00 1111 01 .. .... 1100 . 0 ..... ..... @qrrx_h
+SQDMULH_vi 0.00 1111 10 . ..... 1100 . 0 ..... ..... @qrrx_s
+
+SQRDMULH_vi 0.00 1111 01 .. .... 1101 . 0 ..... ..... @qrrx_h
+SQRDMULH_vi 0.00 1111 10 . ..... 1101 . 0 ..... ..... @qrrx_s
+
+# Floating-point conditional select
+
+FCSEL 0001 1110 .. 1 rm:5 cond:4 11 rn:5 rd:5 esz=%esz_hsd
+
+# Floating-point data-processing (3 source)
+
+@rrrr_hsd .... .... .. . rm:5 . ra:5 rn:5 rd:5 &rrrr_e esz=%esz_hsd
+
+FMADD 0001 1111 .. 0 ..... 0 ..... ..... ..... @rrrr_hsd
+FMSUB 0001 1111 .. 0 ..... 1 ..... ..... ..... @rrrr_hsd
+FNMADD 0001 1111 .. 1 ..... 0 ..... ..... ..... @rrrr_hsd
+FNMSUB 0001 1111 .. 1 ..... 1 ..... ..... ..... @rrrr_hsd
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
index da41a44f75..0899251eef 100644
--- a/target/arm/tcg/cpu64.c
+++ b/target/arm/tcg/cpu64.c
@@ -1168,6 +1168,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = cpu->isar.id_aa64isar2;
t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */
t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */
+ t = FIELD_DP64(t, ID_AA64ISAR2, WFXT, 2); /* FEAT_WFxT */
cpu->isar.id_aa64isar2 = t;
t = cpu->isar.id_aa64pfr0;
diff --git a/target/arm/tcg/gengvec.c b/target/arm/tcg/gengvec.c
index 22c9d17dce..56a1dc1f75 100644
--- a/target/arm/tcg/gengvec.c
+++ b/target/arm/tcg/gengvec.c
@@ -29,11 +29,32 @@ static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs,
{
TCGv_ptr qc_ptr = tcg_temp_new_ptr();
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
tcg_gen_addi_ptr(qc_ptr, tcg_env, offsetof(CPUARMState, vfp.qc));
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
opr_sz, max_sz, 0, fn);
}
+void gen_gvec_sqdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3_ptr * const fns[2] = {
+ gen_helper_neon_sqdmulh_h, gen_helper_neon_sqdmulh_s
+ };
+ tcg_debug_assert(vece >= 1 && vece <= 2);
+ gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
+}
+
+void gen_gvec_sqrdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3_ptr * const fns[2] = {
+ gen_helper_neon_sqrdmulh_h, gen_helper_neon_sqrdmulh_s
+ };
+ tcg_debug_assert(vece >= 1 && vece <= 2);
+ gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
+}
+
void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
{
@@ -933,21 +954,17 @@ void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
/* CMTST : test is "if (X & Y != 0)". */
static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
- tcg_gen_and_i32(d, a, b);
- tcg_gen_negsetcond_i32(TCG_COND_NE, d, d, tcg_constant_i32(0));
+ tcg_gen_negsetcond_i32(TCG_COND_TSTNE, d, a, b);
}
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
{
- tcg_gen_and_i64(d, a, b);
- tcg_gen_negsetcond_i64(TCG_COND_NE, d, d, tcg_constant_i64(0));
+ tcg_gen_negsetcond_i64(TCG_COND_TSTNE, d, a, b);
}
static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
{
- tcg_gen_and_vec(vece, d, a, b);
- tcg_gen_dupi_vec(vece, a, 0);
- tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
+ tcg_gen_cmp_vec(TCG_COND_TSTNE, vece, d, a, b);
}
void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
@@ -1217,21 +1234,113 @@ void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
}
-static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
+void gen_gvec_srshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3 * const fns[] = {
+ gen_helper_gvec_srshl_b, gen_helper_gvec_srshl_h,
+ gen_helper_gvec_srshl_s, gen_helper_gvec_srshl_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
+}
+
+void gen_gvec_urshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3 * const fns[] = {
+ gen_helper_gvec_urshl_b, gen_helper_gvec_urshl_h,
+ gen_helper_gvec_urshl_s, gen_helper_gvec_urshl_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
+}
+
+void gen_neon_sqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3_ptr * const fns[] = {
+ gen_helper_neon_sqshl_b, gen_helper_neon_sqshl_h,
+ gen_helper_neon_sqshl_s, gen_helper_neon_sqshl_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, tcg_env,
+ opr_sz, max_sz, 0, fns[vece]);
+}
+
+void gen_neon_uqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3_ptr * const fns[] = {
+ gen_helper_neon_uqshl_b, gen_helper_neon_uqshl_h,
+ gen_helper_neon_uqshl_s, gen_helper_neon_uqshl_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, tcg_env,
+ opr_sz, max_sz, 0, fns[vece]);
+}
+
+void gen_neon_sqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3_ptr * const fns[] = {
+ gen_helper_neon_sqrshl_b, gen_helper_neon_sqrshl_h,
+ gen_helper_neon_sqrshl_s, gen_helper_neon_sqrshl_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, tcg_env,
+ opr_sz, max_sz, 0, fns[vece]);
+}
+
+void gen_neon_uqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3_ptr * const fns[] = {
+ gen_helper_neon_uqrshl_b, gen_helper_neon_uqrshl_h,
+ gen_helper_neon_uqrshl_s, gen_helper_neon_uqrshl_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, tcg_env,
+ opr_sz, max_sz, 0, fns[vece]);
+}
+
+void gen_uqadd_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz)
+{
+ uint64_t max = MAKE_64BIT_MASK(0, 8 << esz);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_add_i64(tmp, a, b);
+ tcg_gen_umin_i64(res, tmp, tcg_constant_i64(max));
+ tcg_gen_xor_i64(tmp, tmp, res);
+ tcg_gen_or_i64(qc, qc, tmp);
+}
+
+void gen_uqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_add_i64(t, a, b);
+ tcg_gen_movcond_i64(TCG_COND_LTU, res, t, a,
+ tcg_constant_i64(UINT64_MAX), t);
+ tcg_gen_xor_i64(t, t, res);
+ tcg_gen_or_i64(qc, qc, t);
+}
+
+static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
TCGv_vec a, TCGv_vec b)
{
TCGv_vec x = tcg_temp_new_vec_matching(t);
tcg_gen_add_vec(vece, x, a, b);
tcg_gen_usadd_vec(vece, t, a, b);
- tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
- tcg_gen_or_vec(vece, sat, sat, x);
+ tcg_gen_xor_vec(vece, x, x, t);
+ tcg_gen_or_vec(vece, qc, qc, x);
}
void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = {
- INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
+ INDEX_op_usadd_vec, INDEX_op_add_vec, 0
};
static const GVecGen4 ops[4] = {
{ .fniv = gen_uqadd_vec,
@@ -1250,30 +1359,68 @@ void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
.opt_opc = vecop_list,
.vece = MO_32 },
{ .fniv = gen_uqadd_vec,
+ .fni8 = gen_uqadd_d,
.fno = gen_helper_gvec_uqadd_d,
.write_aofs = true,
.opt_opc = vecop_list,
.vece = MO_64 },
};
+
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
}
-static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
+void gen_sqadd_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz)
+{
+ int64_t max = MAKE_64BIT_MASK(0, (8 << esz) - 1);
+ int64_t min = -1ll - max;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_add_i64(tmp, a, b);
+ tcg_gen_smin_i64(res, tmp, tcg_constant_i64(max));
+ tcg_gen_smax_i64(res, res, tcg_constant_i64(min));
+ tcg_gen_xor_i64(tmp, tmp, res);
+ tcg_gen_or_i64(qc, qc, tmp);
+}
+
+void gen_sqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_add_i64(t0, a, b);
+
+ /* Compute signed overflow indication into T1 */
+ tcg_gen_xor_i64(t1, a, b);
+ tcg_gen_xor_i64(t2, t0, a);
+ tcg_gen_andc_i64(t1, t2, t1);
+
+ /* Compute saturated value into T2 */
+ tcg_gen_sari_i64(t2, a, 63);
+ tcg_gen_xori_i64(t2, t2, INT64_MAX);
+
+ tcg_gen_movcond_i64(TCG_COND_LT, res, t1, tcg_constant_i64(0), t2, t0);
+ tcg_gen_xor_i64(t0, t0, res);
+ tcg_gen_or_i64(qc, qc, t0);
+}
+
+static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
TCGv_vec a, TCGv_vec b)
{
TCGv_vec x = tcg_temp_new_vec_matching(t);
tcg_gen_add_vec(vece, x, a, b);
tcg_gen_ssadd_vec(vece, t, a, b);
- tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
- tcg_gen_or_vec(vece, sat, sat, x);
+ tcg_gen_xor_vec(vece, x, x, t);
+ tcg_gen_or_vec(vece, qc, qc, x);
}
void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = {
- INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
+ INDEX_op_ssadd_vec, INDEX_op_add_vec, 0
};
static const GVecGen4 ops[4] = {
{ .fniv = gen_sqadd_vec,
@@ -1292,30 +1439,53 @@ void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
.write_aofs = true,
.vece = MO_32 },
{ .fniv = gen_sqadd_vec,
+ .fni8 = gen_sqadd_d,
.fno = gen_helper_gvec_sqadd_d,
.opt_opc = vecop_list,
.write_aofs = true,
.vece = MO_64 },
};
+
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
}
-static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
+void gen_uqsub_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(tmp, a, b);
+ tcg_gen_smax_i64(res, tmp, tcg_constant_i64(0));
+ tcg_gen_xor_i64(tmp, tmp, res);
+ tcg_gen_or_i64(qc, qc, tmp);
+}
+
+void gen_uqsub_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(t, a, b);
+ tcg_gen_movcond_i64(TCG_COND_LTU, res, a, b, tcg_constant_i64(0), t);
+ tcg_gen_xor_i64(t, t, res);
+ tcg_gen_or_i64(qc, qc, t);
+}
+
+static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
TCGv_vec a, TCGv_vec b)
{
TCGv_vec x = tcg_temp_new_vec_matching(t);
tcg_gen_sub_vec(vece, x, a, b);
tcg_gen_ussub_vec(vece, t, a, b);
- tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
- tcg_gen_or_vec(vece, sat, sat, x);
+ tcg_gen_xor_vec(vece, x, x, t);
+ tcg_gen_or_vec(vece, qc, qc, x);
}
void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = {
- INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
+ INDEX_op_ussub_vec, INDEX_op_sub_vec, 0
};
static const GVecGen4 ops[4] = {
{ .fniv = gen_uqsub_vec,
@@ -1334,30 +1504,68 @@ void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
.write_aofs = true,
.vece = MO_32 },
{ .fniv = gen_uqsub_vec,
+ .fni8 = gen_uqsub_d,
.fno = gen_helper_gvec_uqsub_d,
.opt_opc = vecop_list,
.write_aofs = true,
.vece = MO_64 },
};
+
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
}
-static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
+void gen_sqsub_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz)
+{
+ int64_t max = MAKE_64BIT_MASK(0, (8 << esz) - 1);
+ int64_t min = -1ll - max;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(tmp, a, b);
+ tcg_gen_smin_i64(res, tmp, tcg_constant_i64(max));
+ tcg_gen_smax_i64(res, res, tcg_constant_i64(min));
+ tcg_gen_xor_i64(tmp, tmp, res);
+ tcg_gen_or_i64(qc, qc, tmp);
+}
+
+void gen_sqsub_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(t0, a, b);
+
+ /* Compute signed overflow indication into T1 */
+ tcg_gen_xor_i64(t1, a, b);
+ tcg_gen_xor_i64(t2, t0, a);
+ tcg_gen_and_i64(t1, t1, t2);
+
+ /* Compute saturated value into T2 */
+ tcg_gen_sari_i64(t2, a, 63);
+ tcg_gen_xori_i64(t2, t2, INT64_MAX);
+
+ tcg_gen_movcond_i64(TCG_COND_LT, res, t1, tcg_constant_i64(0), t2, t0);
+ tcg_gen_xor_i64(t0, t0, res);
+ tcg_gen_or_i64(qc, qc, t0);
+}
+
+static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
TCGv_vec a, TCGv_vec b)
{
TCGv_vec x = tcg_temp_new_vec_matching(t);
tcg_gen_sub_vec(vece, x, a, b);
tcg_gen_sssub_vec(vece, t, a, b);
- tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
- tcg_gen_or_vec(vece, sat, sat, x);
+ tcg_gen_xor_vec(vece, x, x, t);
+ tcg_gen_or_vec(vece, qc, qc, x);
}
void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
{
static const TCGOpcode vecop_list[] = {
- INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
+ INDEX_op_sssub_vec, INDEX_op_sub_vec, 0
};
static const GVecGen4 ops[4] = {
{ .fniv = gen_sqsub_vec,
@@ -1376,11 +1584,14 @@ void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
.write_aofs = true,
.vece = MO_32 },
{ .fniv = gen_sqsub_vec,
+ .fni8 = gen_sqsub_d,
.fno = gen_helper_gvec_sqsub_d,
.opt_opc = vecop_list,
.write_aofs = true,
.vece = MO_64 },
};
+
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
}
@@ -1670,3 +1881,435 @@ void gen_gvec_uminp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
tcg_debug_assert(vece <= MO_32);
tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
}
+
+static void gen_shadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_and_i64(t, a, b);
+ tcg_gen_vec_sar8i_i64(a, a, 1);
+ tcg_gen_vec_sar8i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_add8_i64(d, a, b);
+ tcg_gen_vec_add8_i64(d, d, t);
+}
+
+static void gen_shadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_and_i64(t, a, b);
+ tcg_gen_vec_sar16i_i64(a, a, 1);
+ tcg_gen_vec_sar16i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_add16_i64(d, a, b);
+ tcg_gen_vec_add16_i64(d, d, t);
+}
+
+static void gen_shadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_and_i32(t, a, b);
+ tcg_gen_sari_i32(a, a, 1);
+ tcg_gen_sari_i32(b, b, 1);
+ tcg_gen_andi_i32(t, t, 1);
+ tcg_gen_add_i32(d, a, b);
+ tcg_gen_add_i32(d, d, t);
+}
+
+static void gen_shadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_and_vec(vece, t, a, b);
+ tcg_gen_sari_vec(vece, a, a, 1);
+ tcg_gen_sari_vec(vece, b, b, 1);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
+ tcg_gen_add_vec(vece, d, a, b);
+ tcg_gen_add_vec(vece, d, d, t);
+}
+
+void gen_gvec_shadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 g[] = {
+ { .fni8 = gen_shadd8_i64,
+ .fniv = gen_shadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_shadd16_i64,
+ .fniv = gen_shadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_shadd_i32,
+ .fniv = gen_shadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ };
+ tcg_debug_assert(vece <= MO_32);
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_uhadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_and_i64(t, a, b);
+ tcg_gen_vec_shr8i_i64(a, a, 1);
+ tcg_gen_vec_shr8i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_add8_i64(d, a, b);
+ tcg_gen_vec_add8_i64(d, d, t);
+}
+
+static void gen_uhadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_and_i64(t, a, b);
+ tcg_gen_vec_shr16i_i64(a, a, 1);
+ tcg_gen_vec_shr16i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_add16_i64(d, a, b);
+ tcg_gen_vec_add16_i64(d, d, t);
+}
+
+static void gen_uhadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_and_i32(t, a, b);
+ tcg_gen_shri_i32(a, a, 1);
+ tcg_gen_shri_i32(b, b, 1);
+ tcg_gen_andi_i32(t, t, 1);
+ tcg_gen_add_i32(d, a, b);
+ tcg_gen_add_i32(d, d, t);
+}
+
+static void gen_uhadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_and_vec(vece, t, a, b);
+ tcg_gen_shri_vec(vece, a, a, 1);
+ tcg_gen_shri_vec(vece, b, b, 1);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
+ tcg_gen_add_vec(vece, d, a, b);
+ tcg_gen_add_vec(vece, d, d, t);
+}
+
+void gen_gvec_uhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 g[] = {
+ { .fni8 = gen_uhadd8_i64,
+ .fniv = gen_uhadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_uhadd16_i64,
+ .fniv = gen_uhadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_uhadd_i32,
+ .fniv = gen_uhadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ };
+ tcg_debug_assert(vece <= MO_32);
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_shsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andc_i64(t, b, a);
+ tcg_gen_vec_sar8i_i64(a, a, 1);
+ tcg_gen_vec_sar8i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_sub8_i64(d, a, b);
+ tcg_gen_vec_sub8_i64(d, d, t);
+}
+
+static void gen_shsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andc_i64(t, b, a);
+ tcg_gen_vec_sar16i_i64(a, a, 1);
+ tcg_gen_vec_sar16i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_sub16_i64(d, a, b);
+ tcg_gen_vec_sub16_i64(d, d, t);
+}
+
+static void gen_shsub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_andc_i32(t, b, a);
+ tcg_gen_sari_i32(a, a, 1);
+ tcg_gen_sari_i32(b, b, 1);
+ tcg_gen_andi_i32(t, t, 1);
+ tcg_gen_sub_i32(d, a, b);
+ tcg_gen_sub_i32(d, d, t);
+}
+
+static void gen_shsub_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_andc_vec(vece, t, b, a);
+ tcg_gen_sari_vec(vece, a, a, 1);
+ tcg_gen_sari_vec(vece, b, b, 1);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
+ tcg_gen_sub_vec(vece, d, a, b);
+ tcg_gen_sub_vec(vece, d, d, t);
+}
+
+void gen_gvec_shsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 g[4] = {
+ { .fni8 = gen_shsub8_i64,
+ .fniv = gen_shsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_shsub16_i64,
+ .fniv = gen_shsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_shsub_i32,
+ .fniv = gen_shsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_uhsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andc_i64(t, b, a);
+ tcg_gen_vec_shr8i_i64(a, a, 1);
+ tcg_gen_vec_shr8i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_sub8_i64(d, a, b);
+ tcg_gen_vec_sub8_i64(d, d, t);
+}
+
+static void gen_uhsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_andc_i64(t, b, a);
+ tcg_gen_vec_shr16i_i64(a, a, 1);
+ tcg_gen_vec_shr16i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_sub16_i64(d, a, b);
+ tcg_gen_vec_sub16_i64(d, d, t);
+}
+
+static void gen_uhsub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_andc_i32(t, b, a);
+ tcg_gen_shri_i32(a, a, 1);
+ tcg_gen_shri_i32(b, b, 1);
+ tcg_gen_andi_i32(t, t, 1);
+ tcg_gen_sub_i32(d, a, b);
+ tcg_gen_sub_i32(d, d, t);
+}
+
+static void gen_uhsub_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_andc_vec(vece, t, b, a);
+ tcg_gen_shri_vec(vece, a, a, 1);
+ tcg_gen_shri_vec(vece, b, b, 1);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
+ tcg_gen_sub_vec(vece, d, a, b);
+ tcg_gen_sub_vec(vece, d, d, t);
+}
+
+void gen_gvec_uhsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 g[4] = {
+ { .fni8 = gen_uhsub8_i64,
+ .fniv = gen_uhsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_uhsub16_i64,
+ .fniv = gen_uhsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_uhsub_i32,
+ .fniv = gen_uhsub_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_srhadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_or_i64(t, a, b);
+ tcg_gen_vec_sar8i_i64(a, a, 1);
+ tcg_gen_vec_sar8i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_add8_i64(d, a, b);
+ tcg_gen_vec_add8_i64(d, d, t);
+}
+
+static void gen_srhadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_or_i64(t, a, b);
+ tcg_gen_vec_sar16i_i64(a, a, 1);
+ tcg_gen_vec_sar16i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_add16_i64(d, a, b);
+ tcg_gen_vec_add16_i64(d, d, t);
+}
+
+static void gen_srhadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_or_i32(t, a, b);
+ tcg_gen_sari_i32(a, a, 1);
+ tcg_gen_sari_i32(b, b, 1);
+ tcg_gen_andi_i32(t, t, 1);
+ tcg_gen_add_i32(d, a, b);
+ tcg_gen_add_i32(d, d, t);
+}
+
+static void gen_srhadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_or_vec(vece, t, a, b);
+ tcg_gen_sari_vec(vece, a, a, 1);
+ tcg_gen_sari_vec(vece, b, b, 1);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
+ tcg_gen_add_vec(vece, d, a, b);
+ tcg_gen_add_vec(vece, d, d, t);
+}
+
+void gen_gvec_srhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 g[] = {
+ { .fni8 = gen_srhadd8_i64,
+ .fniv = gen_srhadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_srhadd16_i64,
+ .fniv = gen_srhadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_srhadd_i32,
+ .fniv = gen_srhadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
+}
+
+static void gen_urhadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_or_i64(t, a, b);
+ tcg_gen_vec_shr8i_i64(a, a, 1);
+ tcg_gen_vec_shr8i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_add8_i64(d, a, b);
+ tcg_gen_vec_add8_i64(d, d, t);
+}
+
+static void gen_urhadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_or_i64(t, a, b);
+ tcg_gen_vec_shr16i_i64(a, a, 1);
+ tcg_gen_vec_shr16i_i64(b, b, 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_add16_i64(d, a, b);
+ tcg_gen_vec_add16_i64(d, d, t);
+}
+
+static void gen_urhadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_or_i32(t, a, b);
+ tcg_gen_shri_i32(a, a, 1);
+ tcg_gen_shri_i32(b, b, 1);
+ tcg_gen_andi_i32(t, t, 1);
+ tcg_gen_add_i32(d, a, b);
+ tcg_gen_add_i32(d, d, t);
+}
+
+static void gen_urhadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_or_vec(vece, t, a, b);
+ tcg_gen_shri_vec(vece, a, a, 1);
+ tcg_gen_shri_vec(vece, b, b, 1);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
+ tcg_gen_add_vec(vece, d, a, b);
+ tcg_gen_add_vec(vece, d, d, t);
+}
+
+void gen_gvec_urhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 g[] = {
+ { .fni8 = gen_urhadd8_i64,
+ .fniv = gen_urhadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_urhadd16_i64,
+ .fniv = gen_urhadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_urhadd_i32,
+ .fniv = gen_urhadd_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ };
+ assert(vece <= MO_32);
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
+}
diff --git a/target/arm/tcg/gengvec64.c b/target/arm/tcg/gengvec64.c
index 093b498b13..2617cde0a5 100644
--- a/target/arm/tcg/gengvec64.c
+++ b/target/arm/tcg/gengvec64.c
@@ -188,3 +188,184 @@ void gen_gvec_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
}
+/*
+ * Set @res to the correctly saturated result.
+ * Set @qc non-zero if saturation occured.
+ */
+void gen_suqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
+ TCGv_i64 a, TCGv_i64 b, MemOp esz)
+{
+ TCGv_i64 max = tcg_constant_i64((1ull << ((8 << esz) - 1)) - 1);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_add_i64(t, a, b);
+ tcg_gen_smin_i64(res, t, max);
+ tcg_gen_xor_i64(t, t, res);
+ tcg_gen_or_i64(qc, qc, t);
+}
+
+void gen_suqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 max = tcg_constant_i64(INT64_MAX);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ /* Maximum value that can be added to @a without overflow. */
+ tcg_gen_sub_i64(t, max, a);
+
+ /* Constrain addend so that the next addition never overflows. */
+ tcg_gen_umin_i64(t, t, b);
+ tcg_gen_add_i64(res, a, t);
+
+ tcg_gen_xor_i64(t, t, b);
+ tcg_gen_or_i64(qc, qc, t);
+}
+
+static void gen_suqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
+ TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec max =
+ tcg_constant_vec_matching(t, vece, (1ull << ((8 << vece) - 1)) - 1);
+ TCGv_vec u = tcg_temp_new_vec_matching(t);
+
+ /* Maximum value that can be added to @a without overflow. */
+ tcg_gen_sub_vec(vece, u, max, a);
+
+ /* Constrain addend so that the next addition never overflows. */
+ tcg_gen_umin_vec(vece, u, u, b);
+ tcg_gen_add_vec(vece, t, u, a);
+
+ /* Compute QC by comparing the adjusted @b. */
+ tcg_gen_xor_vec(vece, u, u, b);
+ tcg_gen_or_vec(vece, qc, qc, u);
+}
+
+void gen_gvec_suqadd_qc(unsigned vece, uint32_t rd_ofs,
+ uint32_t rn_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_add_vec, INDEX_op_sub_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ { .fniv = gen_suqadd_vec,
+ .fno = gen_helper_gvec_suqadd_b,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_8 },
+ { .fniv = gen_suqadd_vec,
+ .fno = gen_helper_gvec_suqadd_h,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_16 },
+ { .fniv = gen_suqadd_vec,
+ .fno = gen_helper_gvec_suqadd_s,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_32 },
+ { .fniv = gen_suqadd_vec,
+ .fni8 = gen_suqadd_d,
+ .fno = gen_helper_gvec_suqadd_d,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_64 },
+ };
+
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
+ rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+void gen_usqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
+ TCGv_i64 a, TCGv_i64 b, MemOp esz)
+{
+ TCGv_i64 max = tcg_constant_i64(MAKE_64BIT_MASK(0, 8 << esz));
+ TCGv_i64 zero = tcg_constant_i64(0);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_add_i64(tmp, a, b);
+ tcg_gen_smin_i64(res, tmp, max);
+ tcg_gen_smax_i64(res, res, zero);
+ tcg_gen_xor_i64(tmp, tmp, res);
+ tcg_gen_or_i64(qc, qc, tmp);
+}
+
+void gen_usqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 tneg = tcg_temp_new_i64();
+ TCGv_i64 tpos = tcg_temp_new_i64();
+ TCGv_i64 max = tcg_constant_i64(UINT64_MAX);
+ TCGv_i64 zero = tcg_constant_i64(0);
+
+ tcg_gen_add_i64(tmp, a, b);
+
+ /* If @b is positive, saturate if (a + b) < a, aka unsigned overflow. */
+ tcg_gen_movcond_i64(TCG_COND_LTU, tpos, tmp, a, max, tmp);
+
+ /* If @b is negative, saturate if a < -b, ie subtraction is negative. */
+ tcg_gen_neg_i64(tneg, b);
+ tcg_gen_movcond_i64(TCG_COND_LTU, tneg, a, tneg, zero, tmp);
+
+ /* Select correct result from sign of @b. */
+ tcg_gen_movcond_i64(TCG_COND_LT, res, b, zero, tneg, tpos);
+ tcg_gen_xor_i64(tmp, tmp, res);
+ tcg_gen_or_i64(qc, qc, tmp);
+}
+
+static void gen_usqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
+ TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec u = tcg_temp_new_vec_matching(t);
+ TCGv_vec z = tcg_constant_vec_matching(t, vece, 0);
+
+ /* Compute unsigned saturation of add for +b and sub for -b. */
+ tcg_gen_neg_vec(vece, t, b);
+ tcg_gen_usadd_vec(vece, u, a, b);
+ tcg_gen_ussub_vec(vece, t, a, t);
+
+ /* Select the correct result depending on the sign of b. */
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, t, b, z, t, u);
+
+ /* Compute QC by comparing against the non-saturated result. */
+ tcg_gen_add_vec(vece, u, a, b);
+ tcg_gen_xor_vec(vece, u, u, t);
+ tcg_gen_or_vec(vece, qc, qc, u);
+}
+
+void gen_gvec_usqadd_qc(unsigned vece, uint32_t rd_ofs,
+ uint32_t rn_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_neg_vec, INDEX_op_add_vec,
+ INDEX_op_usadd_vec, INDEX_op_ussub_vec,
+ INDEX_op_cmpsel_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ { .fniv = gen_usqadd_vec,
+ .fno = gen_helper_gvec_usqadd_b,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_8 },
+ { .fniv = gen_usqadd_vec,
+ .fno = gen_helper_gvec_usqadd_h,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_16 },
+ { .fniv = gen_usqadd_vec,
+ .fno = gen_helper_gvec_usqadd_s,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_32 },
+ { .fniv = gen_usqadd_vec,
+ .fni8 = gen_usqadd_d,
+ .fno = gen_helper_gvec_usqadd_d,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_64 },
+ };
+
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
+ rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
diff --git a/target/arm/tcg/neon-dp.decode b/target/arm/tcg/neon-dp.decode
index fd3a01bfa0..788578c8fa 100644
--- a/target/arm/tcg/neon-dp.decode
+++ b/target/arm/tcg/neon-dp.decode
@@ -102,37 +102,12 @@ VCGE_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 1 .... @3same
VSHL_S_3s 1111 001 0 0 . .. .... .... 0100 . . . 0 .... @3same_rev
VSHL_U_3s 1111 001 1 0 . .. .... .... 0100 . . . 0 .... @3same_rev
-
-# Insns operating on 64-bit elements (size!=0b11 handled elsewhere)
-# The _rev suffix indicates that Vn and Vm are reversed (as explained
-# by the comment for the @3same_rev format).
-@3same_64_rev .... ... . . . 11 .... .... .... . q:1 . . .... \
- &3same vm=%vn_dp vn=%vm_dp vd=%vd_dp size=3
-
-{
- VQSHL_S64_3s 1111 001 0 0 . .. .... .... 0100 . . . 1 .... @3same_64_rev
- VQSHL_S_3s 1111 001 0 0 . .. .... .... 0100 . . . 1 .... @3same_rev
-}
-{
- VQSHL_U64_3s 1111 001 1 0 . .. .... .... 0100 . . . 1 .... @3same_64_rev
- VQSHL_U_3s 1111 001 1 0 . .. .... .... 0100 . . . 1 .... @3same_rev
-}
-{
- VRSHL_S64_3s 1111 001 0 0 . .. .... .... 0101 . . . 0 .... @3same_64_rev
- VRSHL_S_3s 1111 001 0 0 . .. .... .... 0101 . . . 0 .... @3same_rev
-}
-{
- VRSHL_U64_3s 1111 001 1 0 . .. .... .... 0101 . . . 0 .... @3same_64_rev
- VRSHL_U_3s 1111 001 1 0 . .. .... .... 0101 . . . 0 .... @3same_rev
-}
-{
- VQRSHL_S64_3s 1111 001 0 0 . .. .... .... 0101 . . . 1 .... @3same_64_rev
- VQRSHL_S_3s 1111 001 0 0 . .. .... .... 0101 . . . 1 .... @3same_rev
-}
-{
- VQRSHL_U64_3s 1111 001 1 0 . .. .... .... 0101 . . . 1 .... @3same_64_rev
- VQRSHL_U_3s 1111 001 1 0 . .. .... .... 0101 . . . 1 .... @3same_rev
-}
+VQSHL_S_3s 1111 001 0 0 . .. .... .... 0100 . . . 1 .... @3same_rev
+VQSHL_U_3s 1111 001 1 0 . .. .... .... 0100 . . . 1 .... @3same_rev
+VRSHL_S_3s 1111 001 0 0 . .. .... .... 0101 . . . 0 .... @3same_rev
+VRSHL_U_3s 1111 001 1 0 . .. .... .... 0101 . . . 0 .... @3same_rev
+VQRSHL_S_3s 1111 001 0 0 . .. .... .... 0101 . . . 1 .... @3same_rev
+VQRSHL_U_3s 1111 001 1 0 . .. .... .... 0101 . . . 1 .... @3same_rev
VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
diff --git a/target/arm/tcg/neon_helper.c b/target/arm/tcg/neon_helper.c
index a0b51c8809..082bfd88ad 100644
--- a/target/arm/tcg/neon_helper.c
+++ b/target/arm/tcg/neon_helper.c
@@ -6,10 +6,11 @@
*
* This code is licensed under the GNU GPL v2.
*/
-#include "qemu/osdep.h"
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
+#include "tcg/tcg-gvec-desc.h"
#include "fpu/softfloat.h"
#include "vec_internal.h"
@@ -117,6 +118,29 @@ NEON_VOP_BODY(vtype, n)
uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \
NEON_VOP_BODY(vtype, n)
+#define NEON_GVEC_VOP2(name, vtype) \
+void HELPER(name)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ vtype *d = vd, *n = vn, *m = vm; \
+ for (i = 0; i < opr_sz / sizeof(vtype); i++) { \
+ NEON_FN(d[i], n[i], m[i]); \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+#define NEON_GVEC_VOP2_ENV(name, vtype) \
+void HELPER(name)(void *vd, void *vn, void *vm, void *venv, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ vtype *d = vd, *n = vn, *m = vm; \
+ CPUARMState *env = venv; \
+ for (i = 0; i < opr_sz / sizeof(vtype); i++) { \
+ NEON_FN(d[i], n[i], m[i]); \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
/* Pairwise operations. */
/* For 32-bit elements each segment only contains a single element, so
the elementwise and pairwise operations are the same. */
@@ -155,414 +179,6 @@ uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
return arg; \
}
-
-#define NEON_USAT(dest, src1, src2, type) do { \
- uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
- if (tmp != (type)tmp) { \
- SET_QC(); \
- dest = ~0; \
- } else { \
- dest = tmp; \
- }} while(0)
-#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
-NEON_VOP_ENV(qadd_u8, neon_u8, 4)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
-NEON_VOP_ENV(qadd_u16, neon_u16, 2)
-#undef NEON_FN
-#undef NEON_USAT
-
-uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- uint32_t res = a + b;
- if (res < a) {
- SET_QC();
- res = ~0;
- }
- return res;
-}
-
-uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
-{
- uint64_t res;
-
- res = src1 + src2;
- if (res < src1) {
- SET_QC();
- res = ~(uint64_t)0;
- }
- return res;
-}
-
-#define NEON_SSAT(dest, src1, src2, type) do { \
- int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
- if (tmp != (type)tmp) { \
- SET_QC(); \
- if (src2 > 0) { \
- tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
- } else { \
- tmp = 1 << (sizeof(type) * 8 - 1); \
- } \
- } \
- dest = tmp; \
- } while(0)
-#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
-NEON_VOP_ENV(qadd_s8, neon_s8, 4)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
-NEON_VOP_ENV(qadd_s16, neon_s16, 2)
-#undef NEON_FN
-#undef NEON_SSAT
-
-uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- uint32_t res = a + b;
- if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
- SET_QC();
- res = ~(((int32_t)a >> 31) ^ SIGNBIT);
- }
- return res;
-}
-
-uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
-{
- uint64_t res;
-
- res = src1 + src2;
- if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
- SET_QC();
- res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
- }
- return res;
-}
-
-/* Unsigned saturating accumulate of signed value
- *
- * Op1/Rn is treated as signed
- * Op2/Rd is treated as unsigned
- *
- * Explicit casting is used to ensure the correct sign extension of
- * inputs. The result is treated as a unsigned value and saturated as such.
- *
- * We use a macro for the 8/16 bit cases which expects signed integers of va,
- * vb, and vr for interim calculation and an unsigned 32 bit result value r.
- */
-
-#define USATACC(bits, shift) \
- do { \
- va = sextract32(a, shift, bits); \
- vb = extract32(b, shift, bits); \
- vr = va + vb; \
- if (vr > UINT##bits##_MAX) { \
- SET_QC(); \
- vr = UINT##bits##_MAX; \
- } else if (vr < 0) { \
- SET_QC(); \
- vr = 0; \
- } \
- r = deposit32(r, shift, bits, vr); \
- } while (0)
-
-uint32_t HELPER(neon_uqadd_s8)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- int16_t va, vb, vr;
- uint32_t r = 0;
-
- USATACC(8, 0);
- USATACC(8, 8);
- USATACC(8, 16);
- USATACC(8, 24);
- return r;
-}
-
-uint32_t HELPER(neon_uqadd_s16)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- int32_t va, vb, vr;
- uint64_t r = 0;
-
- USATACC(16, 0);
- USATACC(16, 16);
- return r;
-}
-
-#undef USATACC
-
-uint32_t HELPER(neon_uqadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- int64_t va = (int32_t)a;
- int64_t vb = (uint32_t)b;
- int64_t vr = va + vb;
- if (vr > UINT32_MAX) {
- SET_QC();
- vr = UINT32_MAX;
- } else if (vr < 0) {
- SET_QC();
- vr = 0;
- }
- return vr;
-}
-
-uint64_t HELPER(neon_uqadd_s64)(CPUARMState *env, uint64_t a, uint64_t b)
-{
- uint64_t res;
- res = a + b;
- /* We only need to look at the pattern of SIGN bits to detect
- * +ve/-ve saturation
- */
- if (~a & b & ~res & SIGNBIT64) {
- SET_QC();
- res = UINT64_MAX;
- } else if (a & ~b & res & SIGNBIT64) {
- SET_QC();
- res = 0;
- }
- return res;
-}
-
-/* Signed saturating accumulate of unsigned value
- *
- * Op1/Rn is treated as unsigned
- * Op2/Rd is treated as signed
- *
- * The result is treated as a signed value and saturated as such
- *
- * We use a macro for the 8/16 bit cases which expects signed integers of va,
- * vb, and vr for interim calculation and an unsigned 32 bit result value r.
- */
-
-#define SSATACC(bits, shift) \
- do { \
- va = extract32(a, shift, bits); \
- vb = sextract32(b, shift, bits); \
- vr = va + vb; \
- if (vr > INT##bits##_MAX) { \
- SET_QC(); \
- vr = INT##bits##_MAX; \
- } else if (vr < INT##bits##_MIN) { \
- SET_QC(); \
- vr = INT##bits##_MIN; \
- } \
- r = deposit32(r, shift, bits, vr); \
- } while (0)
-
-uint32_t HELPER(neon_sqadd_u8)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- int16_t va, vb, vr;
- uint32_t r = 0;
-
- SSATACC(8, 0);
- SSATACC(8, 8);
- SSATACC(8, 16);
- SSATACC(8, 24);
- return r;
-}
-
-uint32_t HELPER(neon_sqadd_u16)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- int32_t va, vb, vr;
- uint32_t r = 0;
-
- SSATACC(16, 0);
- SSATACC(16, 16);
-
- return r;
-}
-
-#undef SSATACC
-
-uint32_t HELPER(neon_sqadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- int64_t res;
- int64_t op1 = (uint32_t)a;
- int64_t op2 = (int32_t)b;
- res = op1 + op2;
- if (res > INT32_MAX) {
- SET_QC();
- res = INT32_MAX;
- } else if (res < INT32_MIN) {
- SET_QC();
- res = INT32_MIN;
- }
- return res;
-}
-
-uint64_t HELPER(neon_sqadd_u64)(CPUARMState *env, uint64_t a, uint64_t b)
-{
- uint64_t res;
- res = a + b;
- /* We only need to look at the pattern of SIGN bits to detect an overflow */
- if (((a & res)
- | (~b & res)
- | (a & ~b)) & SIGNBIT64) {
- SET_QC();
- res = INT64_MAX;
- }
- return res;
-}
-
-
-#define NEON_USAT(dest, src1, src2, type) do { \
- uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
- if (tmp != (type)tmp) { \
- SET_QC(); \
- dest = 0; \
- } else { \
- dest = tmp; \
- }} while(0)
-#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
-NEON_VOP_ENV(qsub_u8, neon_u8, 4)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
-NEON_VOP_ENV(qsub_u16, neon_u16, 2)
-#undef NEON_FN
-#undef NEON_USAT
-
-uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- uint32_t res = a - b;
- if (res > a) {
- SET_QC();
- res = 0;
- }
- return res;
-}
-
-uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
-{
- uint64_t res;
-
- if (src1 < src2) {
- SET_QC();
- res = 0;
- } else {
- res = src1 - src2;
- }
- return res;
-}
-
-#define NEON_SSAT(dest, src1, src2, type) do { \
- int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
- if (tmp != (type)tmp) { \
- SET_QC(); \
- if (src2 < 0) { \
- tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
- } else { \
- tmp = 1 << (sizeof(type) * 8 - 1); \
- } \
- } \
- dest = tmp; \
- } while(0)
-#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
-NEON_VOP_ENV(qsub_s8, neon_s8, 4)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
-NEON_VOP_ENV(qsub_s16, neon_s16, 2)
-#undef NEON_FN
-#undef NEON_SSAT
-
-uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b)
-{
- uint32_t res = a - b;
- if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
- SET_QC();
- res = ~(((int32_t)a >> 31) ^ SIGNBIT);
- }
- return res;
-}
-
-uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
-{
- uint64_t res;
-
- res = src1 - src2;
- if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
- SET_QC();
- res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
- }
- return res;
-}
-
-#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
-NEON_VOP(hadd_s8, neon_s8, 4)
-NEON_VOP(hadd_u8, neon_u8, 4)
-NEON_VOP(hadd_s16, neon_s16, 2)
-NEON_VOP(hadd_u16, neon_u16, 2)
-#undef NEON_FN
-
-int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
-{
- int32_t dest;
-
- dest = (src1 >> 1) + (src2 >> 1);
- if (src1 & src2 & 1)
- dest++;
- return dest;
-}
-
-uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
-{
- uint32_t dest;
-
- dest = (src1 >> 1) + (src2 >> 1);
- if (src1 & src2 & 1)
- dest++;
- return dest;
-}
-
-#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
-NEON_VOP(rhadd_s8, neon_s8, 4)
-NEON_VOP(rhadd_u8, neon_u8, 4)
-NEON_VOP(rhadd_s16, neon_s16, 2)
-NEON_VOP(rhadd_u16, neon_u16, 2)
-#undef NEON_FN
-
-int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
-{
- int32_t dest;
-
- dest = (src1 >> 1) + (src2 >> 1);
- if ((src1 | src2) & 1)
- dest++;
- return dest;
-}
-
-uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
-{
- uint32_t dest;
-
- dest = (src1 >> 1) + (src2 >> 1);
- if ((src1 | src2) & 1)
- dest++;
- return dest;
-}
-
-#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
-NEON_VOP(hsub_s8, neon_s8, 4)
-NEON_VOP(hsub_u8, neon_u8, 4)
-NEON_VOP(hsub_s16, neon_s16, 2)
-NEON_VOP(hsub_u16, neon_u16, 2)
-#undef NEON_FN
-
-int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
-{
- int32_t dest;
-
- dest = (src1 >> 1) - (src2 >> 1);
- if ((~src1) & src2 & 1)
- dest--;
- return dest;
-}
-
-uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
-{
- uint32_t dest;
-
- dest = (src1 >> 1) - (src2 >> 1);
- if ((~src1) & src2 & 1)
- dest--;
- return dest;
-}
-
#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
NEON_POP(pmin_s8, neon_s8, 4)
NEON_POP(pmin_u8, neon_u8, 4)
@@ -590,11 +206,23 @@ NEON_VOP(shl_s16, neon_s16, 2)
#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
NEON_VOP(rshl_s8, neon_s8, 4)
+NEON_GVEC_VOP2(gvec_srshl_b, int8_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
NEON_VOP(rshl_s16, neon_s16, 2)
+NEON_GVEC_VOP2(gvec_srshl_h, int16_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 32, true, NULL))
+NEON_GVEC_VOP2(gvec_srshl_s, int32_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_d(src1, (int8_t)src2, true, NULL))
+NEON_GVEC_VOP2(gvec_srshl_d, int64_t)
#undef NEON_FN
uint32_t HELPER(neon_rshl_s32)(uint32_t val, uint32_t shift)
@@ -610,11 +238,23 @@ uint64_t HELPER(neon_rshl_s64)(uint64_t val, uint64_t shift)
#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
NEON_VOP(rshl_u8, neon_u8, 4)
+NEON_GVEC_VOP2(gvec_urshl_b, uint8_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
NEON_VOP(rshl_u16, neon_u16, 2)
+NEON_GVEC_VOP2(gvec_urshl_h, uint16_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 32, true, NULL))
+NEON_GVEC_VOP2(gvec_urshl_s, int32_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_d(src1, (int8_t)src2, true, NULL))
+NEON_GVEC_VOP2(gvec_urshl_d, int64_t)
#undef NEON_FN
uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shift)
@@ -630,11 +270,23 @@ uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shift)
#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
NEON_VOP_ENV(qshl_u8, neon_u8, 4)
+NEON_GVEC_VOP2_ENV(neon_uqshl_b, uint8_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
NEON_VOP_ENV(qshl_u16, neon_u16, 2)
+NEON_GVEC_VOP2_ENV(neon_uqshl_h, uint16_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 32, false, env->vfp.qc))
+NEON_GVEC_VOP2_ENV(neon_uqshl_s, uint32_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_d(src1, (int8_t)src2, false, env->vfp.qc))
+NEON_GVEC_VOP2_ENV(neon_uqshl_d, uint64_t)
#undef NEON_FN
uint32_t HELPER(neon_qshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift)
@@ -650,11 +302,23 @@ uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift)
#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
NEON_VOP_ENV(qshl_s8, neon_s8, 4)
+NEON_GVEC_VOP2_ENV(neon_sqshl_b, int8_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
NEON_VOP_ENV(qshl_s16, neon_s16, 2)
+NEON_GVEC_VOP2_ENV(neon_sqshl_h, int16_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 32, false, env->vfp.qc))
+NEON_GVEC_VOP2_ENV(neon_sqshl_s, int32_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_d(src1, (int8_t)src2, false, env->vfp.qc))
+NEON_GVEC_VOP2_ENV(neon_sqshl_d, int64_t)
#undef NEON_FN
uint32_t HELPER(neon_qshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
@@ -690,11 +354,23 @@ uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
+NEON_GVEC_VOP2_ENV(neon_uqrshl_b, uint8_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
+NEON_GVEC_VOP2_ENV(neon_uqrshl_h, uint16_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 32, true, env->vfp.qc))
+NEON_GVEC_VOP2_ENV(neon_uqrshl_s, uint32_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_d(src1, (int8_t)src2, true, env->vfp.qc))
+NEON_GVEC_VOP2_ENV(neon_uqrshl_d, uint64_t)
#undef NEON_FN
uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift)
@@ -710,11 +386,23 @@ uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift)
#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
+NEON_GVEC_VOP2_ENV(neon_sqrshl_b, int8_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
+NEON_GVEC_VOP2_ENV(neon_sqrshl_h, int16_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 32, true, env->vfp.qc))
+NEON_GVEC_VOP2_ENV(neon_sqrshl_s, int32_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_d(src1, (int8_t)src2, true, env->vfp.qc))
+NEON_GVEC_VOP2_ENV(neon_sqrshl_d, int64_t)
#undef NEON_FN
uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
index c199b69fbf..c083e5cfb8 100644
--- a/target/arm/tcg/op_helper.c
+++ b/target/arm/tcg/op_helper.c
@@ -409,6 +409,60 @@ void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
#endif
}
+void HELPER(wfit)(CPUARMState *env, uint64_t timeout)
+{
+#ifdef CONFIG_USER_ONLY
+ /*
+ * WFI in the user-mode emulator is technically permitted but not
+ * something any real-world code would do. AArch64 Linux kernels
+ * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
+ * AArch32 kernels don't trap it so it will delay a bit.
+ * For QEMU, make it NOP here, because trying to raise EXCP_HLT
+ * would trigger an abort.
+ */
+ return;
+#else
+ ARMCPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+ int target_el = check_wfx_trap(env, false);
+ /* The WFIT should time out when CNTVCT_EL0 >= the specified value. */
+ uint64_t cntval = gt_get_countervalue(env);
+ uint64_t offset = gt_virt_cnt_offset(env);
+ uint64_t cntvct = cntval - offset;
+ uint64_t nexttick;
+
+ if (cpu_has_work(cs) || cntvct >= timeout) {
+ /*
+ * Don't bother to go into our "low power state" if
+ * we would just wake up immediately.
+ */
+ return;
+ }
+
+ if (target_el) {
+ env->pc -= 4;
+ raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, false),
+ target_el);
+ }
+
+ if (uadd64_overflow(timeout, offset, &nexttick)) {
+ nexttick = UINT64_MAX;
+ }
+ if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
+ /*
+ * If the timeout is too long for the signed 64-bit range
+ * of a QEMUTimer, let it expire early.
+ */
+ timer_mod_ns(cpu->wfxt_timer, INT64_MAX);
+ } else {
+ timer_mod(cpu->wfxt_timer, nexttick);
+ }
+ cs->exception_index = EXCP_HLT;
+ cs->halted = 1;
+ cpu_loop_exit(cs);
+#endif
+}
+
void HELPER(wfe)(CPUARMState *env)
{
/* This is a hint instruction that is semantically different
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 9167e4d0bd..93543da39c 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -724,19 +724,6 @@ static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
}
-/* Expand a 3-operand + qc + operation using an out-of-line helper. */
-static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
- int rm, gen_helper_gvec_3_ptr *fn)
-{
- TCGv_ptr qc_ptr = tcg_temp_new_ptr();
-
- tcg_gen_addi_ptr(qc_ptr, tcg_env, offsetof(CPUARMState, vfp.qc));
- tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm), qc_ptr,
- is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
-}
-
/* Expand a 4-operand operation using an out-of-line helper. */
static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
int rm, int ra, int data, gen_helper_gvec_4 *fn)
@@ -1363,6 +1350,14 @@ static bool do_gvec_fn3_no64(DisasContext *s, arg_qrrr_e *a, GVecGen3Fn *fn)
return true;
}
+static bool do_gvec_fn3_no8_no64(DisasContext *s, arg_qrrr_e *a, GVecGen3Fn *fn)
+{
+ if (a->esz == MO_8) {
+ return false;
+ }
+ return do_gvec_fn3_no64(s, a, fn);
+}
+
static bool do_gvec_fn4(DisasContext *s, arg_qrrrr_e *a, GVecGen4Fn *fn)
{
if (!a->q && a->esz == MO_64) {
@@ -1750,6 +1745,47 @@ static bool trans_WFE(DisasContext *s, arg_WFI *a)
return true;
}
+static bool trans_WFIT(DisasContext *s, arg_WFIT *a)
+{
+ if (!dc_isar_feature(aa64_wfxt, s)) {
+ return false;
+ }
+
+ /*
+ * Because we need to pass the register value to the helper,
+ * it's easier to emit the code now, unlike trans_WFI which
+ * defers it to aarch64_tr_tb_stop(). That means we need to
+ * check ss_active so that single-stepping a WFIT doesn't halt.
+ */
+ if (s->ss_active) {
+ /* Act like a NOP under architectural singlestep */
+ return true;
+ }
+
+ gen_a64_update_pc(s, 4);
+ gen_helper_wfit(tcg_env, cpu_reg(s, a->rd));
+ /* Go back to the main loop to check for interrupts */
+ s->base.is_jmp = DISAS_EXIT;
+ return true;
+}
+
+static bool trans_WFET(DisasContext *s, arg_WFET *a)
+{
+ if (!dc_isar_feature(aa64_wfxt, s)) {
+ return false;
+ }
+
+ /*
+ * We rely here on our WFE implementation being a NOP, so we
+ * don't need to do anything different to handle the WFET timeout
+ * from what trans_WFE does.
+ */
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ s->base.is_jmp = DISAS_WFE;
+ }
+ return true;
+}
+
static bool trans_XPACLRI(DisasContext *s, arg_XPACLRI *a)
{
if (s->pauth_active) {
@@ -5060,6 +5096,163 @@ static const FPScalar f_scalar_frsqrts = {
};
TRANS(FRSQRTS_s, do_fp3_scalar, a, &f_scalar_frsqrts)
+static bool do_satacc_s(DisasContext *s, arg_rrr_e *a,
+ MemOp sgn_n, MemOp sgn_m,
+ void (*gen_bhs)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, MemOp),
+ void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 t0, t1, t2, qc;
+ MemOp esz = a->esz;
+
+ if (!fp_access_check(s)) {
+ return true;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ qc = tcg_temp_new_i64();
+ read_vec_element(s, t1, a->rn, 0, esz | sgn_n);
+ read_vec_element(s, t2, a->rm, 0, esz | sgn_m);
+ tcg_gen_ld_i64(qc, tcg_env, offsetof(CPUARMState, vfp.qc));
+
+ if (esz == MO_64) {
+ gen_d(t0, qc, t1, t2);
+ } else {
+ gen_bhs(t0, qc, t1, t2, esz);
+ tcg_gen_ext_i64(t0, t0, esz);
+ }
+
+ write_fp_dreg(s, a->rd, t0);
+ tcg_gen_st_i64(qc, tcg_env, offsetof(CPUARMState, vfp.qc));
+ return true;
+}
+
+TRANS(SQADD_s, do_satacc_s, a, MO_SIGN, MO_SIGN, gen_sqadd_bhs, gen_sqadd_d)
+TRANS(SQSUB_s, do_satacc_s, a, MO_SIGN, MO_SIGN, gen_sqsub_bhs, gen_sqsub_d)
+TRANS(UQADD_s, do_satacc_s, a, 0, 0, gen_uqadd_bhs, gen_uqadd_d)
+TRANS(UQSUB_s, do_satacc_s, a, 0, 0, gen_uqsub_bhs, gen_uqsub_d)
+TRANS(SUQADD_s, do_satacc_s, a, MO_SIGN, 0, gen_suqadd_bhs, gen_suqadd_d)
+TRANS(USQADD_s, do_satacc_s, a, 0, MO_SIGN, gen_usqadd_bhs, gen_usqadd_d)
+
+static bool do_int3_scalar_d(DisasContext *s, arg_rrr_e *a,
+ void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ read_vec_element(s, t0, a->rn, 0, MO_64);
+ read_vec_element(s, t1, a->rm, 0, MO_64);
+ fn(t0, t0, t1);
+ write_fp_dreg(s, a->rd, t0);
+ }
+ return true;
+}
+
+TRANS(SSHL_s, do_int3_scalar_d, a, gen_sshl_i64)
+TRANS(USHL_s, do_int3_scalar_d, a, gen_ushl_i64)
+TRANS(SRSHL_s, do_int3_scalar_d, a, gen_helper_neon_rshl_s64)
+TRANS(URSHL_s, do_int3_scalar_d, a, gen_helper_neon_rshl_u64)
+TRANS(ADD_s, do_int3_scalar_d, a, tcg_gen_add_i64)
+TRANS(SUB_s, do_int3_scalar_d, a, tcg_gen_sub_i64)
+
+typedef struct ENVScalar2 {
+ NeonGenTwoOpEnvFn *gen_bhs[3];
+ NeonGenTwo64OpEnvFn *gen_d;
+} ENVScalar2;
+
+static bool do_env_scalar2(DisasContext *s, arg_rrr_e *a, const ENVScalar2 *f)
+{
+ if (!fp_access_check(s)) {
+ return true;
+ }
+ if (a->esz == MO_64) {
+ TCGv_i64 t0 = read_fp_dreg(s, a->rn);
+ TCGv_i64 t1 = read_fp_dreg(s, a->rm);
+ f->gen_d(t0, tcg_env, t0, t1);
+ write_fp_dreg(s, a->rd, t0);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, t0, a->rn, 0, a->esz);
+ read_vec_element_i32(s, t1, a->rm, 0, a->esz);
+ f->gen_bhs[a->esz](t0, tcg_env, t0, t1);
+ write_fp_sreg(s, a->rd, t0);
+ }
+ return true;
+}
+
+static const ENVScalar2 f_scalar_sqshl = {
+ { gen_helper_neon_qshl_s8,
+ gen_helper_neon_qshl_s16,
+ gen_helper_neon_qshl_s32 },
+ gen_helper_neon_qshl_s64,
+};
+TRANS(SQSHL_s, do_env_scalar2, a, &f_scalar_sqshl)
+
+static const ENVScalar2 f_scalar_uqshl = {
+ { gen_helper_neon_qshl_u8,
+ gen_helper_neon_qshl_u16,
+ gen_helper_neon_qshl_u32 },
+ gen_helper_neon_qshl_u64,
+};
+TRANS(UQSHL_s, do_env_scalar2, a, &f_scalar_uqshl)
+
+static const ENVScalar2 f_scalar_sqrshl = {
+ { gen_helper_neon_qrshl_s8,
+ gen_helper_neon_qrshl_s16,
+ gen_helper_neon_qrshl_s32 },
+ gen_helper_neon_qrshl_s64,
+};
+TRANS(SQRSHL_s, do_env_scalar2, a, &f_scalar_sqrshl)
+
+static const ENVScalar2 f_scalar_uqrshl = {
+ { gen_helper_neon_qrshl_u8,
+ gen_helper_neon_qrshl_u16,
+ gen_helper_neon_qrshl_u32 },
+ gen_helper_neon_qrshl_u64,
+};
+TRANS(UQRSHL_s, do_env_scalar2, a, &f_scalar_uqrshl)
+
+static bool do_env_scalar2_hs(DisasContext *s, arg_rrr_e *a,
+ const ENVScalar2 *f)
+{
+ if (a->esz == MO_16 || a->esz == MO_32) {
+ return do_env_scalar2(s, a, f);
+ }
+ return false;
+}
+
+static const ENVScalar2 f_scalar_sqdmulh = {
+ { NULL, gen_helper_neon_qdmulh_s16, gen_helper_neon_qdmulh_s32 }
+};
+TRANS(SQDMULH_s, do_env_scalar2_hs, a, &f_scalar_sqdmulh)
+
+static const ENVScalar2 f_scalar_sqrdmulh = {
+ { NULL, gen_helper_neon_qrdmulh_s16, gen_helper_neon_qrdmulh_s32 }
+};
+TRANS(SQRDMULH_s, do_env_scalar2_hs, a, &f_scalar_sqrdmulh)
+
+static bool do_cmop_d(DisasContext *s, arg_rrr_e *a, TCGCond cond)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 t0 = read_fp_dreg(s, a->rn);
+ TCGv_i64 t1 = read_fp_dreg(s, a->rm);
+ tcg_gen_negsetcond_i64(cond, t0, t0, t1);
+ write_fp_dreg(s, a->rd, t0);
+ }
+ return true;
+}
+
+TRANS(CMGT_s, do_cmop_d, a, TCG_COND_GT)
+TRANS(CMHI_s, do_cmop_d, a, TCG_COND_GTU)
+TRANS(CMGE_s, do_cmop_d, a, TCG_COND_GE)
+TRANS(CMHS_s, do_cmop_d, a, TCG_COND_GEU)
+TRANS(CMEQ_s, do_cmop_d, a, TCG_COND_EQ)
+TRANS(CMTST_s, do_cmop_d, a, TCG_COND_TSTNE)
+
static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a,
gen_helper_gvec_3_ptr * const fns[3])
{
@@ -5298,6 +5491,68 @@ TRANS(BSL_v, do_bitsel, a->q, a->rd, a->rd, a->rn, a->rm)
TRANS(BIT_v, do_bitsel, a->q, a->rd, a->rm, a->rn, a->rd)
TRANS(BIF_v, do_bitsel, a->q, a->rd, a->rm, a->rd, a->rn)
+TRANS(SQADD_v, do_gvec_fn3, a, gen_gvec_sqadd_qc)
+TRANS(UQADD_v, do_gvec_fn3, a, gen_gvec_uqadd_qc)
+TRANS(SQSUB_v, do_gvec_fn3, a, gen_gvec_sqsub_qc)
+TRANS(UQSUB_v, do_gvec_fn3, a, gen_gvec_uqsub_qc)
+TRANS(SUQADD_v, do_gvec_fn3, a, gen_gvec_suqadd_qc)
+TRANS(USQADD_v, do_gvec_fn3, a, gen_gvec_usqadd_qc)
+
+TRANS(SSHL_v, do_gvec_fn3, a, gen_gvec_sshl)
+TRANS(USHL_v, do_gvec_fn3, a, gen_gvec_ushl)
+TRANS(SRSHL_v, do_gvec_fn3, a, gen_gvec_srshl)
+TRANS(URSHL_v, do_gvec_fn3, a, gen_gvec_urshl)
+TRANS(SQSHL_v, do_gvec_fn3, a, gen_neon_sqshl)
+TRANS(UQSHL_v, do_gvec_fn3, a, gen_neon_uqshl)
+TRANS(SQRSHL_v, do_gvec_fn3, a, gen_neon_sqrshl)
+TRANS(UQRSHL_v, do_gvec_fn3, a, gen_neon_uqrshl)
+
+TRANS(ADD_v, do_gvec_fn3, a, tcg_gen_gvec_add)
+TRANS(SUB_v, do_gvec_fn3, a, tcg_gen_gvec_sub)
+TRANS(SHADD_v, do_gvec_fn3_no64, a, gen_gvec_shadd)
+TRANS(UHADD_v, do_gvec_fn3_no64, a, gen_gvec_uhadd)
+TRANS(SHSUB_v, do_gvec_fn3_no64, a, gen_gvec_shsub)
+TRANS(UHSUB_v, do_gvec_fn3_no64, a, gen_gvec_uhsub)
+TRANS(SRHADD_v, do_gvec_fn3_no64, a, gen_gvec_srhadd)
+TRANS(URHADD_v, do_gvec_fn3_no64, a, gen_gvec_urhadd)
+TRANS(SMAX_v, do_gvec_fn3_no64, a, tcg_gen_gvec_smax)
+TRANS(UMAX_v, do_gvec_fn3_no64, a, tcg_gen_gvec_umax)
+TRANS(SMIN_v, do_gvec_fn3_no64, a, tcg_gen_gvec_smin)
+TRANS(UMIN_v, do_gvec_fn3_no64, a, tcg_gen_gvec_umin)
+TRANS(SABA_v, do_gvec_fn3_no64, a, gen_gvec_saba)
+TRANS(UABA_v, do_gvec_fn3_no64, a, gen_gvec_uaba)
+TRANS(SABD_v, do_gvec_fn3_no64, a, gen_gvec_sabd)
+TRANS(UABD_v, do_gvec_fn3_no64, a, gen_gvec_uabd)
+TRANS(MUL_v, do_gvec_fn3_no64, a, tcg_gen_gvec_mul)
+TRANS(PMUL_v, do_gvec_op3_ool, a, 0, gen_helper_gvec_pmul_b)
+TRANS(MLA_v, do_gvec_fn3_no64, a, gen_gvec_mla)
+TRANS(MLS_v, do_gvec_fn3_no64, a, gen_gvec_mls)
+
+static bool do_cmop_v(DisasContext *s, arg_qrrr_e *a, TCGCond cond)
+{
+ if (a->esz == MO_64 && !a->q) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ tcg_gen_gvec_cmp(cond, a->esz,
+ vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ a->q ? 16 : 8, vec_full_reg_size(s));
+ }
+ return true;
+}
+
+TRANS(CMGT_v, do_cmop_v, a, TCG_COND_GT)
+TRANS(CMHI_v, do_cmop_v, a, TCG_COND_GTU)
+TRANS(CMGE_v, do_cmop_v, a, TCG_COND_GE)
+TRANS(CMHS_v, do_cmop_v, a, TCG_COND_GEU)
+TRANS(CMEQ_v, do_cmop_v, a, TCG_COND_EQ)
+TRANS(CMTST_v, do_gvec_fn3, a, gen_gvec_cmtst)
+
+TRANS(SQDMULH_v, do_gvec_fn3_no8_no64, a, gen_gvec_sqdmulh_qc)
+TRANS(SQRDMULH_v, do_gvec_fn3_no8_no64, a, gen_gvec_sqrdmulh_qc)
+
/*
* Advanced SIMD scalar/vector x indexed element
*/
@@ -5405,6 +5660,27 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg)
TRANS(FMLA_si, do_fmla_scalar_idx, a, false)
TRANS(FMLS_si, do_fmla_scalar_idx, a, true)
+static bool do_env_scalar2_idx_hs(DisasContext *s, arg_rrx_e *a,
+ const ENVScalar2 *f)
+{
+ if (a->esz < MO_16 || a->esz > MO_32) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, t0, a->rn, 0, a->esz);
+ read_vec_element_i32(s, t1, a->rm, a->idx, a->esz);
+ f->gen_bhs[a->esz](t0, tcg_env, t0, t1);
+ write_fp_sreg(s, a->rd, t0);
+ }
+ return true;
+}
+
+TRANS(SQDMULH_si, do_env_scalar2_idx_hs, a, &f_scalar_sqdmulh)
+TRANS(SQRDMULH_si, do_env_scalar2_idx_hs, a, &f_scalar_sqrdmulh)
+
static bool do_fp3_vector_idx(DisasContext *s, arg_qrrx_e *a,
gen_helper_gvec_3_ptr * const fns[3])
{
@@ -5501,6 +5777,67 @@ TRANS_FEAT(FMLSL_vi, aa64_fhm, do_fmlal_idx, a, true, false)
TRANS_FEAT(FMLAL2_vi, aa64_fhm, do_fmlal_idx, a, false, true)
TRANS_FEAT(FMLSL2_vi, aa64_fhm, do_fmlal_idx, a, true, true)
+static bool do_int3_vector_idx(DisasContext *s, arg_qrrx_e *a,
+ gen_helper_gvec_3 * const fns[2])
+{
+ assert(a->esz == MO_16 || a->esz == MO_32);
+ if (fp_access_check(s)) {
+ gen_gvec_op3_ool(s, a->q, a->rd, a->rn, a->rm, a->idx, fns[a->esz - 1]);
+ }
+ return true;
+}
+
+static gen_helper_gvec_3 * const f_vector_idx_mul[2] = {
+ gen_helper_gvec_mul_idx_h,
+ gen_helper_gvec_mul_idx_s,
+};
+TRANS(MUL_vi, do_int3_vector_idx, a, f_vector_idx_mul)
+
+static bool do_mla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool sub)
+{
+ static gen_helper_gvec_4 * const fns[2][2] = {
+ { gen_helper_gvec_mla_idx_h, gen_helper_gvec_mls_idx_h },
+ { gen_helper_gvec_mla_idx_s, gen_helper_gvec_mls_idx_s },
+ };
+
+ assert(a->esz == MO_16 || a->esz == MO_32);
+ if (fp_access_check(s)) {
+ gen_gvec_op4_ool(s, a->q, a->rd, a->rn, a->rm, a->rd,
+ a->idx, fns[a->esz - 1][sub]);
+ }
+ return true;
+}
+
+TRANS(MLA_vi, do_mla_vector_idx, a, false)
+TRANS(MLS_vi, do_mla_vector_idx, a, true)
+
+static bool do_int3_qc_vector_idx(DisasContext *s, arg_qrrx_e *a,
+ gen_helper_gvec_4 * const fns[2])
+{
+ assert(a->esz == MO_16 || a->esz == MO_32);
+ if (fp_access_check(s)) {
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ offsetof(CPUARMState, vfp.qc),
+ a->q ? 16 : 8, vec_full_reg_size(s),
+ a->idx, fns[a->esz - 1]);
+ }
+ return true;
+}
+
+static gen_helper_gvec_4 * const f_vector_idx_sqdmulh[2] = {
+ gen_helper_neon_sqdmulh_idx_h,
+ gen_helper_neon_sqdmulh_idx_s,
+};
+TRANS(SQDMULH_vi, do_int3_qc_vector_idx, a, f_vector_idx_sqdmulh)
+
+static gen_helper_gvec_4 * const f_vector_idx_sqrdmulh[2] = {
+ gen_helper_neon_sqrdmulh_idx_h,
+ gen_helper_neon_sqrdmulh_idx_s,
+};
+TRANS(SQRDMULH_vi, do_int3_qc_vector_idx, a, f_vector_idx_sqrdmulh)
+
/*
* Advanced SIMD scalar pairwise
*/
@@ -5570,6 +5907,132 @@ static bool trans_ADDP_s(DisasContext *s, arg_rr_e *a)
return true;
}
+/*
+ * Floating-point conditional select
+ */
+
+static bool trans_FCSEL(DisasContext *s, arg_FCSEL *a)
+{
+ TCGv_i64 t_true, t_false;
+ DisasCompare64 c;
+
+ switch (a->esz) {
+ case MO_32:
+ case MO_64:
+ break;
+ case MO_16:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ if (!fp_access_check(s)) {
+ return true;
+ }
+
+ /* Zero extend sreg & hreg inputs to 64 bits now. */
+ t_true = tcg_temp_new_i64();
+ t_false = tcg_temp_new_i64();
+ read_vec_element(s, t_true, a->rn, 0, a->esz);
+ read_vec_element(s, t_false, a->rm, 0, a->esz);
+
+ a64_test_cc(&c, a->cond);
+ tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0),
+ t_true, t_false);
+
+ /*
+ * Note that sregs & hregs write back zeros to the high bits,
+ * and we've already done the zero-extension.
+ */
+ write_fp_dreg(s, a->rd, t_true);
+ return true;
+}
+
+/*
+ * Floating-point data-processing (3 source)
+ */
+
+static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n)
+{
+ TCGv_ptr fpst;
+
+ /*
+ * These are fused multiply-add. Note that doing the negations here
+ * as separate steps is correct: an input NaN should come out with
+ * its sign bit flipped if it is a negated-input.
+ */
+ switch (a->esz) {
+ case MO_64:
+ if (fp_access_check(s)) {
+ TCGv_i64 tn = read_fp_dreg(s, a->rn);
+ TCGv_i64 tm = read_fp_dreg(s, a->rm);
+ TCGv_i64 ta = read_fp_dreg(s, a->ra);
+
+ if (neg_a) {
+ gen_vfp_negd(ta, ta);
+ }
+ if (neg_n) {
+ gen_vfp_negd(tn, tn);
+ }
+ fpst = fpstatus_ptr(FPST_FPCR);
+ gen_helper_vfp_muladdd(ta, tn, tm, ta, fpst);
+ write_fp_dreg(s, a->rd, ta);
+ }
+ break;
+
+ case MO_32:
+ if (fp_access_check(s)) {
+ TCGv_i32 tn = read_fp_sreg(s, a->rn);
+ TCGv_i32 tm = read_fp_sreg(s, a->rm);
+ TCGv_i32 ta = read_fp_sreg(s, a->ra);
+
+ if (neg_a) {
+ gen_vfp_negs(ta, ta);
+ }
+ if (neg_n) {
+ gen_vfp_negs(tn, tn);
+ }
+ fpst = fpstatus_ptr(FPST_FPCR);
+ gen_helper_vfp_muladds(ta, tn, tm, ta, fpst);
+ write_fp_sreg(s, a->rd, ta);
+ }
+ break;
+
+ case MO_16:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i32 tn = read_fp_hreg(s, a->rn);
+ TCGv_i32 tm = read_fp_hreg(s, a->rm);
+ TCGv_i32 ta = read_fp_hreg(s, a->ra);
+
+ if (neg_a) {
+ gen_vfp_negh(ta, ta);
+ }
+ if (neg_n) {
+ gen_vfp_negh(tn, tn);
+ }
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ gen_helper_advsimd_muladdh(ta, tn, tm, ta, fpst);
+ write_fp_sreg(s, a->rd, ta);
+ }
+ break;
+
+ default:
+ return false;
+ }
+ return true;
+}
+
+TRANS(FMADD, do_fmadd, a, false, false)
+TRANS(FNMADD, do_fmadd, a, true, true)
+TRANS(FMSUB, do_fmadd, a, false, true)
+TRANS(FNMSUB, do_fmadd, a, true, false)
+
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM
@@ -6954,68 +7417,6 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
}
}
-/* Floating point conditional select
- * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
- * +---+---+---+-----------+------+---+------+------+-----+------+------+
- * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
- * +---+---+---+-----------+------+---+------+------+-----+------+------+
- */
-static void disas_fp_csel(DisasContext *s, uint32_t insn)
-{
- unsigned int mos, type, rm, cond, rn, rd;
- TCGv_i64 t_true, t_false;
- DisasCompare64 c;
- MemOp sz;
-
- mos = extract32(insn, 29, 3);
- type = extract32(insn, 22, 2);
- rm = extract32(insn, 16, 5);
- cond = extract32(insn, 12, 4);
- rn = extract32(insn, 5, 5);
- rd = extract32(insn, 0, 5);
-
- if (mos) {
- unallocated_encoding(s);
- return;
- }
-
- switch (type) {
- case 0:
- sz = MO_32;
- break;
- case 1:
- sz = MO_64;
- break;
- case 3:
- sz = MO_16;
- if (dc_isar_feature(aa64_fp16, s)) {
- break;
- }
- /* fallthru */
- default:
- unallocated_encoding(s);
- return;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
-
- /* Zero extend sreg & hreg inputs to 64 bits now. */
- t_true = tcg_temp_new_i64();
- t_false = tcg_temp_new_i64();
- read_vec_element(s, t_true, rn, 0, sz);
- read_vec_element(s, t_false, rm, 0, sz);
-
- a64_test_cc(&c, cond);
- tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0),
- t_true, t_false);
-
- /* Note that sregs & hregs write back zeros to the high bits,
- and we've already done the zero-extension. */
- write_fp_dreg(s, rd, t_true);
-}
-
/* Floating-point data-processing (1 source) - half precision */
static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
{
@@ -7369,152 +7770,6 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
}
}
-/* Floating-point data-processing (3 source) - single precision */
-static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
- int rd, int rn, int rm, int ra)
-{
- TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
- TCGv_i32 tcg_res = tcg_temp_new_i32();
- TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
-
- tcg_op1 = read_fp_sreg(s, rn);
- tcg_op2 = read_fp_sreg(s, rm);
- tcg_op3 = read_fp_sreg(s, ra);
-
- /* These are fused multiply-add, and must be done as one
- * floating point operation with no rounding between the
- * multiplication and addition steps.
- * NB that doing the negations here as separate steps is
- * correct : an input NaN should come out with its sign bit
- * flipped if it is a negated-input.
- */
- if (o1 == true) {
- gen_vfp_negs(tcg_op3, tcg_op3);
- }
-
- if (o0 != o1) {
- gen_vfp_negs(tcg_op1, tcg_op1);
- }
-
- gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
-
- write_fp_sreg(s, rd, tcg_res);
-}
-
-/* Floating-point data-processing (3 source) - double precision */
-static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
- int rd, int rn, int rm, int ra)
-{
- TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
- TCGv_i64 tcg_res = tcg_temp_new_i64();
- TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
-
- tcg_op1 = read_fp_dreg(s, rn);
- tcg_op2 = read_fp_dreg(s, rm);
- tcg_op3 = read_fp_dreg(s, ra);
-
- /* These are fused multiply-add, and must be done as one
- * floating point operation with no rounding between the
- * multiplication and addition steps.
- * NB that doing the negations here as separate steps is
- * correct : an input NaN should come out with its sign bit
- * flipped if it is a negated-input.
- */
- if (o1 == true) {
- gen_vfp_negd(tcg_op3, tcg_op3);
- }
-
- if (o0 != o1) {
- gen_vfp_negd(tcg_op1, tcg_op1);
- }
-
- gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
-
- write_fp_dreg(s, rd, tcg_res);
-}
-
-/* Floating-point data-processing (3 source) - half precision */
-static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
- int rd, int rn, int rm, int ra)
-{
- TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
- TCGv_i32 tcg_res = tcg_temp_new_i32();
- TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
-
- tcg_op1 = read_fp_hreg(s, rn);
- tcg_op2 = read_fp_hreg(s, rm);
- tcg_op3 = read_fp_hreg(s, ra);
-
- /* These are fused multiply-add, and must be done as one
- * floating point operation with no rounding between the
- * multiplication and addition steps.
- * NB that doing the negations here as separate steps is
- * correct : an input NaN should come out with its sign bit
- * flipped if it is a negated-input.
- */
- if (o1 == true) {
- tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
- }
-
- if (o0 != o1) {
- tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
- }
-
- gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
-
- write_fp_sreg(s, rd, tcg_res);
-}
-
-/* Floating point data-processing (3 source)
- * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
- * +---+---+---+-----------+------+----+------+----+------+------+------+
- * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
- * +---+---+---+-----------+------+----+------+----+------+------+------+
- */
-static void disas_fp_3src(DisasContext *s, uint32_t insn)
-{
- int mos = extract32(insn, 29, 3);
- int type = extract32(insn, 22, 2);
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int ra = extract32(insn, 10, 5);
- int rm = extract32(insn, 16, 5);
- bool o0 = extract32(insn, 15, 1);
- bool o1 = extract32(insn, 21, 1);
-
- if (mos) {
- unallocated_encoding(s);
- return;
- }
-
- switch (type) {
- case 0:
- if (!fp_access_check(s)) {
- return;
- }
- handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
- break;
- case 1:
- if (!fp_access_check(s)) {
- return;
- }
- handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
- break;
- case 3:
- if (!dc_isar_feature(aa64_fp16, s)) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
- break;
- default:
- unallocated_encoding(s);
- }
-}
-
/* Floating point immediate
* 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
* +---+---+---+-----------+------+---+------------+-------+------+------+
@@ -7959,8 +8214,7 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
{
if (extract32(insn, 24, 1)) {
- /* Floating point data-processing (3 source) */
- disas_fp_3src(s, insn);
+ unallocated_encoding(s); /* in decodetree */
} else if (extract32(insn, 21, 1) == 0) {
/* Floating point to fixed point conversions */
disas_fp_fixed_conv(s, insn);
@@ -7976,7 +8230,7 @@ static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
break;
case 3:
/* Floating point conditional select */
- disas_fp_csel(s, insn);
+ unallocated_encoding(s); /* in decodetree */
break;
case 0:
switch (ctz32(extract32(insn, 12, 4))) {
@@ -9282,219 +9536,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
}
}
-static void handle_3same_64(DisasContext *s, int opcode, bool u,
- TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
-{
- /* Handle 64x64->64 opcodes which are shared between the scalar
- * and vector 3-same groups. We cover every opcode where size == 3
- * is valid in either the three-reg-same (integer, not pairwise)
- * or scalar-three-reg-same groups.
- */
- TCGCond cond;
-
- switch (opcode) {
- case 0x1: /* SQADD */
- if (u) {
- gen_helper_neon_qadd_u64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
- } else {
- gen_helper_neon_qadd_s64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
- }
- break;
- case 0x5: /* SQSUB */
- if (u) {
- gen_helper_neon_qsub_u64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
- } else {
- gen_helper_neon_qsub_s64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
- }
- break;
- case 0x6: /* CMGT, CMHI */
- cond = u ? TCG_COND_GTU : TCG_COND_GT;
- do_cmop:
- /* 64 bit integer comparison, result = test ? -1 : 0. */
- tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
- break;
- case 0x7: /* CMGE, CMHS */
- cond = u ? TCG_COND_GEU : TCG_COND_GE;
- goto do_cmop;
- case 0x11: /* CMTST, CMEQ */
- if (u) {
- cond = TCG_COND_EQ;
- goto do_cmop;
- }
- gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
- break;
- case 0x8: /* SSHL, USHL */
- if (u) {
- gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
- } else {
- gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
- }
- break;
- case 0x9: /* SQSHL, UQSHL */
- if (u) {
- gen_helper_neon_qshl_u64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
- } else {
- gen_helper_neon_qshl_s64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
- }
- break;
- case 0xa: /* SRSHL, URSHL */
- if (u) {
- gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
- } else {
- gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
- }
- break;
- case 0xb: /* SQRSHL, UQRSHL */
- if (u) {
- gen_helper_neon_qrshl_u64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
- } else {
- gen_helper_neon_qrshl_s64(tcg_rd, tcg_env, tcg_rn, tcg_rm);
- }
- break;
- case 0x10: /* ADD, SUB */
- if (u) {
- tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
- } else {
- tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
- }
- break;
- default:
- g_assert_not_reached();
- }
-}
-
-/* AdvSIMD scalar three same
- * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
- * +-----+---+-----------+------+---+------+--------+---+------+------+
- * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
- * +-----+---+-----------+------+---+------+--------+---+------+------+
- */
-static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- int opcode = extract32(insn, 11, 5);
- int rm = extract32(insn, 16, 5);
- int size = extract32(insn, 22, 2);
- bool u = extract32(insn, 29, 1);
- TCGv_i64 tcg_rd;
-
- switch (opcode) {
- case 0x1: /* SQADD, UQADD */
- case 0x5: /* SQSUB, UQSUB */
- case 0x9: /* SQSHL, UQSHL */
- case 0xb: /* SQRSHL, UQRSHL */
- break;
- case 0x8: /* SSHL, USHL */
- case 0xa: /* SRSHL, URSHL */
- case 0x6: /* CMGT, CMHI */
- case 0x7: /* CMGE, CMHS */
- case 0x11: /* CMTST, CMEQ */
- case 0x10: /* ADD, SUB (vector) */
- if (size != 3) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x16: /* SQDMULH, SQRDMULH (vector) */
- if (size != 1 && size != 2) {
- unallocated_encoding(s);
- return;
- }
- break;
- default:
- unallocated_encoding(s);
- return;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
-
- tcg_rd = tcg_temp_new_i64();
-
- if (size == 3) {
- TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
- TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
-
- handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
- } else {
- /* Do a single operation on the lowest element in the vector.
- * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
- * no side effects for all these operations.
- * OPTME: special-purpose helpers would avoid doing some
- * unnecessary work in the helper for the 8 and 16 bit cases.
- */
- NeonGenTwoOpEnvFn *genenvfn;
- TCGv_i32 tcg_rn = tcg_temp_new_i32();
- TCGv_i32 tcg_rm = tcg_temp_new_i32();
- TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
-
- read_vec_element_i32(s, tcg_rn, rn, 0, size);
- read_vec_element_i32(s, tcg_rm, rm, 0, size);
-
- switch (opcode) {
- case 0x1: /* SQADD, UQADD */
- {
- static NeonGenTwoOpEnvFn * const fns[3][2] = {
- { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
- { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
- { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
- };
- genenvfn = fns[size][u];
- break;
- }
- case 0x5: /* SQSUB, UQSUB */
- {
- static NeonGenTwoOpEnvFn * const fns[3][2] = {
- { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
- { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
- { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
- };
- genenvfn = fns[size][u];
- break;
- }
- case 0x9: /* SQSHL, UQSHL */
- {
- static NeonGenTwoOpEnvFn * const fns[3][2] = {
- { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
- { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
- { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
- };
- genenvfn = fns[size][u];
- break;
- }
- case 0xb: /* SQRSHL, UQRSHL */
- {
- static NeonGenTwoOpEnvFn * const fns[3][2] = {
- { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
- { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
- { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
- };
- genenvfn = fns[size][u];
- break;
- }
- case 0x16: /* SQDMULH, SQRDMULH */
- {
- static NeonGenTwoOpEnvFn * const fns[2][2] = {
- { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
- { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
- };
- assert(size == 1 || size == 2);
- genenvfn = fns[size - 1][u];
- break;
- }
- default:
- g_assert_not_reached();
- }
-
- genenvfn(tcg_rd32, tcg_env, tcg_rn, tcg_rm);
- tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
- }
-
- write_fp_dreg(s, rd, tcg_rd);
-}
-
/* AdvSIMD scalar three same extra
* 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
* +-----+---+-----------+------+---+------+---+--------+---+----+----+
@@ -9981,88 +10022,6 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar,
clear_vec_high(s, is_q, rd);
}
-/* Remaining saturating accumulating ops */
-static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
- bool is_q, int size, int rn, int rd)
-{
- bool is_double = (size == 3);
-
- if (is_double) {
- TCGv_i64 tcg_rn = tcg_temp_new_i64();
- TCGv_i64 tcg_rd = tcg_temp_new_i64();
- int pass;
-
- for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
- read_vec_element(s, tcg_rn, rn, pass, MO_64);
- read_vec_element(s, tcg_rd, rd, pass, MO_64);
-
- if (is_u) { /* USQADD */
- gen_helper_neon_uqadd_s64(tcg_rd, tcg_env, tcg_rn, tcg_rd);
- } else { /* SUQADD */
- gen_helper_neon_sqadd_u64(tcg_rd, tcg_env, tcg_rn, tcg_rd);
- }
- write_vec_element(s, tcg_rd, rd, pass, MO_64);
- }
- clear_vec_high(s, !is_scalar, rd);
- } else {
- TCGv_i32 tcg_rn = tcg_temp_new_i32();
- TCGv_i32 tcg_rd = tcg_temp_new_i32();
- int pass, maxpasses;
-
- if (is_scalar) {
- maxpasses = 1;
- } else {
- maxpasses = is_q ? 4 : 2;
- }
-
- for (pass = 0; pass < maxpasses; pass++) {
- if (is_scalar) {
- read_vec_element_i32(s, tcg_rn, rn, pass, size);
- read_vec_element_i32(s, tcg_rd, rd, pass, size);
- } else {
- read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
- read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
- }
-
- if (is_u) { /* USQADD */
- switch (size) {
- case 0:
- gen_helper_neon_uqadd_s8(tcg_rd, tcg_env, tcg_rn, tcg_rd);
- break;
- case 1:
- gen_helper_neon_uqadd_s16(tcg_rd, tcg_env, tcg_rn, tcg_rd);
- break;
- case 2:
- gen_helper_neon_uqadd_s32(tcg_rd, tcg_env, tcg_rn, tcg_rd);
- break;
- default:
- g_assert_not_reached();
- }
- } else { /* SUQADD */
- switch (size) {
- case 0:
- gen_helper_neon_sqadd_u8(tcg_rd, tcg_env, tcg_rn, tcg_rd);
- break;
- case 1:
- gen_helper_neon_sqadd_u16(tcg_rd, tcg_env, tcg_rn, tcg_rd);
- break;
- case 2:
- gen_helper_neon_sqadd_u32(tcg_rd, tcg_env, tcg_rn, tcg_rd);
- break;
- default:
- g_assert_not_reached();
- }
- }
-
- if (is_scalar) {
- write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64);
- }
- write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
- }
- clear_vec_high(s, is_q, rd);
- }
-}
-
/* AdvSIMD scalar two reg misc
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
* +-----+---+-----------+------+-----------+--------+-----+------+------+
@@ -10082,12 +10041,6 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
TCGv_ptr tcg_fpstatus;
switch (opcode) {
- case 0x3: /* USQADD / SUQADD*/
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_satacc(s, true, u, false, size, rn, rd);
- return;
case 0x7: /* SQABS / SQNEG */
break;
case 0xa: /* CMLT */
@@ -10187,6 +10140,7 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
}
break;
default:
+ case 0x3: /* USQADD / SUQADD */
unallocated_encoding(s);
return;
}
@@ -10919,284 +10873,6 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
}
}
-/* Integer op subgroup of C3.6.16. */
-static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
-{
- int is_q = extract32(insn, 30, 1);
- int u = extract32(insn, 29, 1);
- int size = extract32(insn, 22, 2);
- int opcode = extract32(insn, 11, 5);
- int rm = extract32(insn, 16, 5);
- int rn = extract32(insn, 5, 5);
- int rd = extract32(insn, 0, 5);
- int pass;
- TCGCond cond;
-
- switch (opcode) {
- case 0x13: /* MUL, PMUL */
- if (u && size != 0) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x0: /* SHADD, UHADD */
- case 0x2: /* SRHADD, URHADD */
- case 0x4: /* SHSUB, UHSUB */
- case 0xc: /* SMAX, UMAX */
- case 0xd: /* SMIN, UMIN */
- case 0xe: /* SABD, UABD */
- case 0xf: /* SABA, UABA */
- case 0x12: /* MLA, MLS */
- if (size == 3) {
- unallocated_encoding(s);
- return;
- }
- break;
- case 0x16: /* SQDMULH, SQRDMULH */
- if (size == 0 || size == 3) {
- unallocated_encoding(s);
- return;
- }
- break;
- default:
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
-
- switch (opcode) {
- case 0x01: /* SQADD, UQADD */
- if (u) {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
- } else {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
- }
- return;
- case 0x05: /* SQSUB, UQSUB */
- if (u) {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
- } else {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
- }
- return;
- case 0x08: /* SSHL, USHL */
- if (u) {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
- } else {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
- }
- return;
- case 0x0c: /* SMAX, UMAX */
- if (u) {
- gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
- } else {
- gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
- }
- return;
- case 0x0d: /* SMIN, UMIN */
- if (u) {
- gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
- } else {
- gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
- }
- return;
- case 0xe: /* SABD, UABD */
- if (u) {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
- } else {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
- }
- return;
- case 0xf: /* SABA, UABA */
- if (u) {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
- } else {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
- }
- return;
- case 0x10: /* ADD, SUB */
- if (u) {
- gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
- } else {
- gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
- }
- return;
- case 0x13: /* MUL, PMUL */
- if (!u) { /* MUL */
- gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
- } else { /* PMUL */
- gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
- }
- return;
- case 0x12: /* MLA, MLS */
- if (u) {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
- } else {
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
- }
- return;
- case 0x16: /* SQDMULH, SQRDMULH */
- {
- static gen_helper_gvec_3_ptr * const fns[2][2] = {
- { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
- { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
- };
- gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
- }
- return;
- case 0x11:
- if (!u) { /* CMTST */
- gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
- return;
- }
- /* else CMEQ */
- cond = TCG_COND_EQ;
- goto do_gvec_cmp;
- case 0x06: /* CMGT, CMHI */
- cond = u ? TCG_COND_GTU : TCG_COND_GT;
- goto do_gvec_cmp;
- case 0x07: /* CMGE, CMHS */
- cond = u ? TCG_COND_GEU : TCG_COND_GE;
- do_gvec_cmp:
- tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm),
- is_q ? 16 : 8, vec_full_reg_size(s));
- return;
- }
-
- if (size == 3) {
- assert(is_q);
- for (pass = 0; pass < 2; pass++) {
- TCGv_i64 tcg_op1 = tcg_temp_new_i64();
- TCGv_i64 tcg_op2 = tcg_temp_new_i64();
- TCGv_i64 tcg_res = tcg_temp_new_i64();
-
- read_vec_element(s, tcg_op1, rn, pass, MO_64);
- read_vec_element(s, tcg_op2, rm, pass, MO_64);
-
- handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
-
- write_vec_element(s, tcg_res, rd, pass, MO_64);
- }
- } else {
- for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
- TCGv_i32 tcg_op1 = tcg_temp_new_i32();
- TCGv_i32 tcg_op2 = tcg_temp_new_i32();
- TCGv_i32 tcg_res = tcg_temp_new_i32();
- NeonGenTwoOpFn *genfn = NULL;
- NeonGenTwoOpEnvFn *genenvfn = NULL;
-
- read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
- read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
-
- switch (opcode) {
- case 0x0: /* SHADD, UHADD */
- {
- static NeonGenTwoOpFn * const fns[3][2] = {
- { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
- { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
- { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
- };
- genfn = fns[size][u];
- break;
- }
- case 0x2: /* SRHADD, URHADD */
- {
- static NeonGenTwoOpFn * const fns[3][2] = {
- { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
- { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
- { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
- };
- genfn = fns[size][u];
- break;
- }
- case 0x4: /* SHSUB, UHSUB */
- {
- static NeonGenTwoOpFn * const fns[3][2] = {
- { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
- { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
- { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
- };
- genfn = fns[size][u];
- break;
- }
- case 0x9: /* SQSHL, UQSHL */
- {
- static NeonGenTwoOpEnvFn * const fns[3][2] = {
- { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
- { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
- { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
- };
- genenvfn = fns[size][u];
- break;
- }
- case 0xa: /* SRSHL, URSHL */
- {
- static NeonGenTwoOpFn * const fns[3][2] = {
- { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
- { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
- { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
- };
- genfn = fns[size][u];
- break;
- }
- case 0xb: /* SQRSHL, UQRSHL */
- {
- static NeonGenTwoOpEnvFn * const fns[3][2] = {
- { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
- { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
- { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
- };
- genenvfn = fns[size][u];
- break;
- }
- default:
- g_assert_not_reached();
- }
-
- if (genenvfn) {
- genenvfn(tcg_res, tcg_env, tcg_op1, tcg_op2);
- } else {
- genfn(tcg_res, tcg_op1, tcg_op2);
- }
-
- write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
- }
- }
- clear_vec_high(s, is_q, rd);
-}
-
-/* AdvSIMD three same
- * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
- * +---+---+---+-----------+------+---+------+--------+---+------+------+
- * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
- * +---+---+---+-----------+------+---+------+--------+---+------+------+
- */
-static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
-{
- int opcode = extract32(insn, 11, 5);
-
- switch (opcode) {
- default:
- disas_simd_3same_int(s, insn);
- break;
- case 0x3: /* logic ops */
- case 0x14: /* SMAXP, UMAXP */
- case 0x15: /* SMINP, UMINP */
- case 0x17: /* ADDP */
- case 0x18 ... 0x31: /* floating point ops */
- unallocated_encoding(s);
- break;
- }
-}
-
/* AdvSIMD three same extra
* 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
* +---+---+---+-----------+------+---+------+---+--------+---+----+----+
@@ -11683,16 +11359,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
return;
}
break;
- case 0x3: /* SUQADD, USQADD */
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
- return;
case 0x7: /* SQABS, SQNEG */
if (size == 3 && !is_q) {
unallocated_encoding(s);
@@ -11867,6 +11533,7 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
break;
}
default:
+ case 0x3: /* SUQADD, USQADD */
unallocated_encoding(s);
return;
}
@@ -12375,14 +12042,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
TCGv_ptr fpst;
switch (16 * u + opcode) {
- case 0x08: /* MUL */
- case 0x10: /* MLA */
- case 0x14: /* MLS */
- if (is_scalar) {
- unallocated_encoding(s);
- return;
- }
- break;
case 0x02: /* SMLAL, SMLAL2 */
case 0x12: /* UMLAL, UMLAL2 */
case 0x06: /* SMLSL, SMLSL2 */
@@ -12400,9 +12059,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
case 0x0b: /* SQDMULL, SQDMULL2 */
is_long = true;
break;
- case 0x0c: /* SQDMULH */
- case 0x0d: /* SQRDMULH */
- break;
case 0x1d: /* SQRDMLAH */
case 0x1f: /* SQRDMLSH */
if (!dc_isar_feature(aa64_rdm, s)) {
@@ -12462,7 +12118,12 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
case 0x01: /* FMLA */
case 0x04: /* FMLSL */
case 0x05: /* FMLS */
+ case 0x08: /* MUL */
case 0x09: /* FMUL */
+ case 0x0c: /* SQDMULH */
+ case 0x0d: /* SQRDMULH */
+ case 0x10: /* MLA */
+ case 0x14: /* MLS */
case 0x18: /* FMLAL2 */
case 0x19: /* FMULX */
case 0x1c: /* FMLSL2 */
@@ -12583,56 +12244,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
: gen_helper_gvec_fcmlah_idx);
}
return;
-
- case 0x08: /* MUL */
- if (!is_long && !is_scalar) {
- static gen_helper_gvec_3 * const fns[3] = {
- gen_helper_gvec_mul_idx_h,
- gen_helper_gvec_mul_idx_s,
- gen_helper_gvec_mul_idx_d,
- };
- tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm),
- is_q ? 16 : 8, vec_full_reg_size(s),
- index, fns[size - 1]);
- return;
- }
- break;
-
- case 0x10: /* MLA */
- if (!is_long && !is_scalar) {
- static gen_helper_gvec_4 * const fns[3] = {
- gen_helper_gvec_mla_idx_h,
- gen_helper_gvec_mla_idx_s,
- gen_helper_gvec_mla_idx_d,
- };
- tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm),
- vec_full_reg_offset(s, rd),
- is_q ? 16 : 8, vec_full_reg_size(s),
- index, fns[size - 1]);
- return;
- }
- break;
-
- case 0x14: /* MLS */
- if (!is_long && !is_scalar) {
- static gen_helper_gvec_4 * const fns[3] = {
- gen_helper_gvec_mls_idx_h,
- gen_helper_gvec_mls_idx_s,
- gen_helper_gvec_mls_idx_d,
- };
- tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
- vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm),
- vec_full_reg_offset(s, rd),
- is_q ? 16 : 8, vec_full_reg_size(s),
- index, fns[size - 1]);
- return;
- }
- break;
}
if (size == 3) {
@@ -12668,7 +12279,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
switch (16 * u + opcode) {
- case 0x08: /* MUL */
case 0x10: /* MLA */
case 0x14: /* MLS */
{
@@ -12917,7 +12527,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
*/
static const AArch64DecodeTable data_proc_simd[] = {
/* pattern , mask , fn */
- { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
{ 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
{ 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
{ 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
@@ -12929,7 +12538,6 @@ static const AArch64DecodeTable data_proc_simd[] = {
{ 0x0e000000, 0xbf208c00, disas_simd_tb },
{ 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
{ 0x2e000000, 0xbf208400, disas_simd_ext },
- { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
{ 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
{ 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
{ 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h
index 91750f0ca9..0fcf7cb63a 100644
--- a/target/arm/tcg/translate-a64.h
+++ b/target/arm/tcg/translate-a64.h
@@ -198,6 +198,20 @@ void gen_gvec_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
void gen_gvec_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
uint32_t a, uint32_t oprsz, uint32_t maxsz);
+void gen_suqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
+ TCGv_i64 a, TCGv_i64 b, MemOp esz);
+void gen_suqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b);
+void gen_gvec_suqadd_qc(unsigned vece, uint32_t rd_ofs,
+ uint32_t rn_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+
+void gen_usqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
+ TCGv_i64 a, TCGv_i64 b, MemOp esz);
+void gen_usqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b);
+void gen_gvec_usqadd_qc(unsigned vece, uint32_t rd_ofs,
+ uint32_t rn_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+
void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c
index 18b048611b..915c9e56db 100644
--- a/target/arm/tcg/translate-neon.c
+++ b/target/arm/tcg/translate-neon.c
@@ -794,6 +794,12 @@ DO_3SAME(VQADD_S, gen_gvec_sqadd_qc)
DO_3SAME(VQADD_U, gen_gvec_uqadd_qc)
DO_3SAME(VQSUB_S, gen_gvec_sqsub_qc)
DO_3SAME(VQSUB_U, gen_gvec_uqsub_qc)
+DO_3SAME(VRSHL_S, gen_gvec_srshl)
+DO_3SAME(VRSHL_U, gen_gvec_urshl)
+DO_3SAME(VQSHL_S, gen_neon_sqshl)
+DO_3SAME(VQSHL_U, gen_neon_uqshl)
+DO_3SAME(VQRSHL_S, gen_neon_sqrshl)
+DO_3SAME(VQRSHL_U, gen_neon_uqrshl)
/* These insns are all gvec_bitsel but with the inputs in various orders. */
#define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
@@ -835,6 +841,12 @@ DO_3SAME_NO_SZ_3(VPMAX_S, gen_gvec_smaxp)
DO_3SAME_NO_SZ_3(VPMIN_S, gen_gvec_sminp)
DO_3SAME_NO_SZ_3(VPMAX_U, gen_gvec_umaxp)
DO_3SAME_NO_SZ_3(VPMIN_U, gen_gvec_uminp)
+DO_3SAME_NO_SZ_3(VHADD_S, gen_gvec_shadd)
+DO_3SAME_NO_SZ_3(VHADD_U, gen_gvec_uhadd)
+DO_3SAME_NO_SZ_3(VHSUB_S, gen_gvec_shsub)
+DO_3SAME_NO_SZ_3(VHSUB_U, gen_gvec_uhsub)
+DO_3SAME_NO_SZ_3(VRHADD_S, gen_gvec_srhadd)
+DO_3SAME_NO_SZ_3(VRHADD_U, gen_gvec_urhadd)
#define DO_3SAME_CMP(INSN, COND) \
static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
@@ -912,51 +924,6 @@ DO_SHA2(SHA256H, gen_helper_crypto_sha256h)
DO_SHA2(SHA256H2, gen_helper_crypto_sha256h2)
DO_SHA2(SHA256SU1, gen_helper_crypto_sha256su1)
-#define DO_3SAME_64(INSN, FUNC) \
- static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
- uint32_t rn_ofs, uint32_t rm_ofs, \
- uint32_t oprsz, uint32_t maxsz) \
- { \
- static const GVecGen3 op = { .fni8 = FUNC }; \
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &op); \
- } \
- DO_3SAME(INSN, gen_##INSN##_3s)
-
-#define DO_3SAME_64_ENV(INSN, FUNC) \
- static void gen_##INSN##_elt(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) \
- { \
- FUNC(d, tcg_env, n, m); \
- } \
- DO_3SAME_64(INSN, gen_##INSN##_elt)
-
-DO_3SAME_64(VRSHL_S64, gen_helper_neon_rshl_s64)
-DO_3SAME_64(VRSHL_U64, gen_helper_neon_rshl_u64)
-DO_3SAME_64_ENV(VQSHL_S64, gen_helper_neon_qshl_s64)
-DO_3SAME_64_ENV(VQSHL_U64, gen_helper_neon_qshl_u64)
-DO_3SAME_64_ENV(VQRSHL_S64, gen_helper_neon_qrshl_s64)
-DO_3SAME_64_ENV(VQRSHL_U64, gen_helper_neon_qrshl_u64)
-
-#define DO_3SAME_32(INSN, FUNC) \
- static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
- uint32_t rn_ofs, uint32_t rm_ofs, \
- uint32_t oprsz, uint32_t maxsz) \
- { \
- static const GVecGen3 ops[4] = { \
- { .fni4 = gen_helper_neon_##FUNC##8 }, \
- { .fni4 = gen_helper_neon_##FUNC##16 }, \
- { .fni4 = gen_helper_neon_##FUNC##32 }, \
- { 0 }, \
- }; \
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
- } \
- static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
- { \
- if (a->size > 2) { \
- return false; \
- } \
- return do_3same(s, a, gen_##INSN##_3s); \
- }
-
/*
* Some helper functions need to be passed the tcg_env. In order
* to use those with the gvec APIs like tcg_gen_gvec_3() we need
@@ -969,67 +936,12 @@ DO_3SAME_64_ENV(VQRSHL_U64, gen_helper_neon_qrshl_u64)
FUNC(d, tcg_env, n, m); \
}
-#define DO_3SAME_32_ENV(INSN, FUNC) \
- WRAP_ENV_FN(gen_##INSN##_tramp8, gen_helper_neon_##FUNC##8); \
- WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##16); \
- WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##32); \
- static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
- uint32_t rn_ofs, uint32_t rm_ofs, \
- uint32_t oprsz, uint32_t maxsz) \
- { \
- static const GVecGen3 ops[4] = { \
- { .fni4 = gen_##INSN##_tramp8 }, \
- { .fni4 = gen_##INSN##_tramp16 }, \
- { .fni4 = gen_##INSN##_tramp32 }, \
- { 0 }, \
- }; \
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
- } \
- static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
- { \
- if (a->size > 2) { \
- return false; \
- } \
- return do_3same(s, a, gen_##INSN##_3s); \
- }
-
-DO_3SAME_32(VHADD_S, hadd_s)
-DO_3SAME_32(VHADD_U, hadd_u)
-DO_3SAME_32(VHSUB_S, hsub_s)
-DO_3SAME_32(VHSUB_U, hsub_u)
-DO_3SAME_32(VRHADD_S, rhadd_s)
-DO_3SAME_32(VRHADD_U, rhadd_u)
-DO_3SAME_32(VRSHL_S, rshl_s)
-DO_3SAME_32(VRSHL_U, rshl_u)
-
-DO_3SAME_32_ENV(VQSHL_S, qshl_s)
-DO_3SAME_32_ENV(VQSHL_U, qshl_u)
-DO_3SAME_32_ENV(VQRSHL_S, qrshl_s)
-DO_3SAME_32_ENV(VQRSHL_U, qrshl_u)
-
#define DO_3SAME_VQDMULH(INSN, FUNC) \
- WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##_s16); \
- WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##_s32); \
- static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
- uint32_t rn_ofs, uint32_t rm_ofs, \
- uint32_t oprsz, uint32_t maxsz) \
- { \
- static const GVecGen3 ops[2] = { \
- { .fni4 = gen_##INSN##_tramp16 }, \
- { .fni4 = gen_##INSN##_tramp32 }, \
- }; \
- tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece - 1]); \
- } \
static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
- { \
- if (a->size != 1 && a->size != 2) { \
- return false; \
- } \
- return do_3same(s, a, gen_##INSN##_3s); \
- }
+ { return a->size >= 1 && a->size <= 2 && do_3same(s, a, FUNC); }
-DO_3SAME_VQDMULH(VQDMULH, qdmulh)
-DO_3SAME_VQDMULH(VQRDMULH, qrdmulh)
+DO_3SAME_VQDMULH(VQDMULH, gen_gvec_sqdmulh_qc)
+DO_3SAME_VQDMULH(VQRDMULH, gen_gvec_sqrdmulh_qc)
#define WRAP_FP_GVEC(WRAPNAME, FPST, FUNC) \
static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index 3abdbedfe5..aba21f730f 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -459,6 +459,31 @@ void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_srshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_urshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_neon_sqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_neon_uqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_neon_sqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_neon_uqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_shadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_uhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_shsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_uhsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_srhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_urhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
@@ -466,12 +491,27 @@ void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void gen_uqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
+ TCGv_i64 a, TCGv_i64 b, MemOp esz);
+void gen_uqadd_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_sqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
+ TCGv_i64 a, TCGv_i64 b, MemOp esz);
+void gen_sqadd_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_uqsub_bhs(TCGv_i64 res, TCGv_i64 qc,
+ TCGv_i64 a, TCGv_i64 b, MemOp esz);
+void gen_uqsub_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_sqsub_bhs(TCGv_i64 res, TCGv_i64 qc,
+ TCGv_i64 a, TCGv_i64 b, MemOp esz);
+void gen_sqsub_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
@@ -499,6 +539,10 @@ void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sqdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sqrdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c
index 56fea14edb..b05922b425 100644
--- a/target/arm/tcg/vec_helper.c
+++ b/target/arm/tcg/vec_helper.c
@@ -311,6 +311,38 @@ void HELPER(neon_sqrdmulh_h)(void *vd, void *vn, void *vm,
clear_tail(d, opr_sz, simd_maxsz(desc));
}
+void HELPER(neon_sqdmulh_idx_h)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
+
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
+ int16_t mm = m[i];
+ for (j = 0; j < 16 / 2; ++j) {
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, false, vq);
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(neon_sqrdmulh_idx_h)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
+
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
+ int16_t mm = m[i];
+ for (j = 0; j < 16 / 2; ++j) {
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, true, vq);
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
void HELPER(sve2_sqrdmlah_h)(void *vd, void *vn, void *vm,
void *va, uint32_t desc)
{
@@ -474,6 +506,38 @@ void HELPER(neon_sqrdmulh_s)(void *vd, void *vn, void *vm,
clear_tail(d, opr_sz, simd_maxsz(desc));
}
+void HELPER(neon_sqdmulh_idx_s)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
+
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
+ int32_t mm = m[i];
+ for (j = 0; j < 16 / 4; ++j) {
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, false, vq);
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(neon_sqrdmulh_idx_s)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
+
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
+ int32_t mm = m[i];
+ for (j = 0; j < 16 / 4; ++j) {
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, true, vq);
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
void HELPER(sve2_sqrdmlah_s)(void *vd, void *vn, void *vm,
void *va, uint32_t desc)
{
@@ -1555,6 +1619,14 @@ DO_SAT(gvec_sqsub_b, int, int8_t, int8_t, -, INT8_MIN, INT8_MAX)
DO_SAT(gvec_sqsub_h, int, int16_t, int16_t, -, INT16_MIN, INT16_MAX)
DO_SAT(gvec_sqsub_s, int64_t, int32_t, int32_t, -, INT32_MIN, INT32_MAX)
+DO_SAT(gvec_usqadd_b, int, uint8_t, int8_t, +, 0, UINT8_MAX)
+DO_SAT(gvec_usqadd_h, int, uint16_t, int16_t, +, 0, UINT16_MAX)
+DO_SAT(gvec_usqadd_s, int64_t, uint32_t, int32_t, +, 0, UINT32_MAX)
+
+DO_SAT(gvec_suqadd_b, int, int8_t, uint8_t, +, INT8_MIN, INT8_MAX)
+DO_SAT(gvec_suqadd_h, int, int16_t, uint16_t, +, INT16_MIN, INT16_MAX)
+DO_SAT(gvec_suqadd_s, int64_t, int32_t, uint32_t, +, INT32_MIN, INT32_MAX)
+
#undef DO_SAT
void HELPER(gvec_uqadd_d)(void *vd, void *vq, void *vn,
@@ -1645,6 +1717,62 @@ void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn,
clear_tail(d, oprsz, simd_maxsz(desc));
}
+void HELPER(gvec_usqadd_d)(void *vd, void *vq, void *vn,
+ void *vm, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ bool q = false;
+
+ for (i = 0; i < oprsz / 8; i++) {
+ uint64_t nn = n[i];
+ int64_t mm = m[i];
+ uint64_t dd = nn + mm;
+
+ if (mm < 0) {
+ if (nn < (uint64_t)-mm) {
+ dd = 0;
+ q = true;
+ }
+ } else {
+ if (dd < nn) {
+ dd = UINT64_MAX;
+ q = true;
+ }
+ }
+ d[i] = dd;
+ }
+ if (q) {
+ uint32_t *qc = vq;
+ qc[0] = 1;
+ }
+ clear_tail(d, oprsz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_suqadd_d)(void *vd, void *vq, void *vn,
+ void *vm, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ bool q = false;
+
+ for (i = 0; i < oprsz / 8; i++) {
+ int64_t nn = n[i];
+ uint64_t mm = m[i];
+ int64_t dd = nn + mm;
+
+ if (mm > (uint64_t)(INT64_MAX - nn)) {
+ dd = INT64_MAX;
+ q = true;
+ }
+ d[i] = dd;
+ }
+ if (q) {
+ uint32_t *qc = vq;
+ qc[0] = 1;
+ }
+ clear_tail(d, oprsz, simd_maxsz(desc));
+}
#define DO_SRA(NAME, TYPE) \
void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
index effc2c1c98..85957943bf 100644
--- a/target/i386/tcg/helper-tcg.h
+++ b/target/i386/tcg/helper-tcg.h
@@ -39,7 +39,7 @@ QEMU_BUILD_BUG_ON(TCG_PHYS_ADDR_BITS > TARGET_PHYS_ADDR_SPACE_BITS);
*/
void x86_cpu_do_interrupt(CPUState *cpu);
#ifndef CONFIG_USER_ONLY
-void x86_cpu_exec_halt(CPUState *cpu);
+bool x86_cpu_exec_halt(CPUState *cpu);
bool x86_need_replay_interrupt(int interrupt_request);
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
#endif
diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/sysemu/seg_helper.c
index 2db8083748..9ba94deb3a 100644
--- a/target/i386/tcg/sysemu/seg_helper.c
+++ b/target/i386/tcg/sysemu/seg_helper.c
@@ -128,7 +128,7 @@ void x86_cpu_do_interrupt(CPUState *cs)
}
}
-void x86_cpu_exec_halt(CPUState *cpu)
+bool x86_cpu_exec_halt(CPUState *cpu)
{
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
X86CPU *x86_cpu = X86_CPU(cpu);
@@ -138,6 +138,7 @@ void x86_cpu_exec_halt(CPUState *cpu)
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
bql_unlock();
}
+ return cpu_has_work(cpu);
}
bool x86_need_replay_interrupt(int interrupt_request)
diff --git a/tests/avocado/machine_aarch64_sbsaref.py b/tests/avocado/machine_aarch64_sbsaref.py
index 98c76c1ff7..6bb82f2a03 100644
--- a/tests/avocado/machine_aarch64_sbsaref.py
+++ b/tests/avocado/machine_aarch64_sbsaref.py
@@ -37,18 +37,18 @@ class Aarch64SbsarefMachine(QemuSystemTest):
Used components:
- - Trusted Firmware 2.10.2
- - Tianocore EDK2 stable202402
- - Tianocore EDK2-platforms commit 085c2fb
+ - Trusted Firmware 2.11.0
+ - Tianocore EDK2 stable202405
+ - Tianocore EDK2-platforms commit 4bbd0ed
"""
# Secure BootRom (TF-A code)
fs0_xz_url = (
"https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/"
- "20240313-116475/edk2/SBSA_FLASH0.fd.xz"
+ "20240528-140808/edk2/SBSA_FLASH0.fd.xz"
)
- fs0_xz_hash = "637593749cc307dea7dc13265c32e5d020267552f22b18a31850b8429fc5e159"
+ fs0_xz_hash = "fa6004900b67172914c908b78557fec4d36a5f784f4c3dd08f49adb75e1892a9"
tar_xz_path = self.fetch_asset(fs0_xz_url, asset_hash=fs0_xz_hash,
algorithm='sha256')
archive.extract(tar_xz_path, self.workdir)
@@ -57,9 +57,9 @@ class Aarch64SbsarefMachine(QemuSystemTest):
# Non-secure rom (UEFI and EFI variables)
fs1_xz_url = (
"https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/"
- "20240313-116475/edk2/SBSA_FLASH1.fd.xz"
+ "20240528-140808/edk2/SBSA_FLASH1.fd.xz"
)
- fs1_xz_hash = "cb0a5e8cf5e303c5d3dc106cfd5943ffe9714b86afddee7164c69ee1dd41991c"
+ fs1_xz_hash = "5f3747d4000bc416d9641e33ff4ac60c3cc8cb74ca51b6e932e58531c62eb6f7"
tar_xz_path = self.fetch_asset(fs1_xz_url, asset_hash=fs1_xz_hash,
algorithm='sha256')
archive.extract(tar_xz_path, self.workdir)
@@ -98,15 +98,15 @@ class Aarch64SbsarefMachine(QemuSystemTest):
# AP Trusted ROM
wait_for_console_pattern(self, "Booting Trusted Firmware")
- wait_for_console_pattern(self, "BL1: v2.10.2(release):")
+ wait_for_console_pattern(self, "BL1: v2.11.0(release):")
wait_for_console_pattern(self, "BL1: Booting BL2")
# Trusted Boot Firmware
- wait_for_console_pattern(self, "BL2: v2.10.2(release)")
+ wait_for_console_pattern(self, "BL2: v2.11.0(release)")
wait_for_console_pattern(self, "Booting BL31")
# EL3 Runtime Software
- wait_for_console_pattern(self, "BL31: v2.10.2(release)")
+ wait_for_console_pattern(self, "BL31: v2.11.0(release)")
# Non-trusted Firmware
wait_for_console_pattern(self, "UEFI firmware (version 1.0")