From 71f5027f34111da7bb769e8d5872294d728a7025 Mon Sep 17 00:00:00 2001 From: Patrick Venture Date: Tue, 15 Jun 2021 12:28:47 -0700 Subject: docs/system/arm: Add quanta-q7l1-bmc reference MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a line-item reference to the supported quanta-q71l-bmc aspeed entry. Signed-off-by: Patrick Venture Reviewed-by: Cédric Le Goater Message-id: 20210615192848.1065297-2-venture@google.com Signed-off-by: Peter Maydell --- docs/system/arm/aspeed.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst index 57ee2bd94f..cec87e3743 100644 --- a/docs/system/arm/aspeed.rst +++ b/docs/system/arm/aspeed.rst @@ -13,6 +13,7 @@ etc. AST2400 SoC based machines : - ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC +- ``quanta-q71l-bmc`` OpenBMC Quanta BMC AST2500 SoC based machines : -- cgit v1.2.3 From fd17995c07ec1bc33f71b4ccb6c1b8721b6b368c Mon Sep 17 00:00:00 2001 From: Patrick Venture Date: Tue, 15 Jun 2021 12:28:48 -0700 Subject: docs/system/arm: Add quanta-gbs-bmc reference MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add line item reference to quanta-gbs-bmc machine. Signed-off-by: Patrick Venture Reviewed-by: Cédric Le Goater Message-id: 20210615192848.1065297-3-venture@google.com [PMM: fixed underline Sphinx warning] Signed-off-by: Peter Maydell --- docs/system/arm/nuvoton.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst index ca011bd479..3cd2b2b18d 100644 --- a/docs/system/arm/nuvoton.rst +++ b/docs/system/arm/nuvoton.rst @@ -1,5 +1,5 @@ -Nuvoton iBMC boards (``npcm750-evb``, ``quanta-gsj``) -===================================================== +Nuvoton iBMC boards (``*-bmc``, ``npcm750-evb``, ``quanta-gsj``) +================================================================ The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are designed to be used as Baseboard Management Controllers (BMCs) in various @@ -18,6 +18,7 @@ segment. The following machines are based on this chip : The NPCM730 SoC has two Cortex-A9 cores and is targeted for Data Center and Hyperscale applications. The following machines are based on this chip : +- ``quanta-gbs-bmc`` Quanta GBS server BMC - ``quanta-gsj`` Quanta GSJ server BMC There are also two more SoCs, NPCM710 and NPCM705, which are single-core -- cgit v1.2.3 From 38f2cfbbc3f2958cba542b1e264a8027eeca4835 Mon Sep 17 00:00:00 2001 From: Nolan Leake Date: Fri, 25 Jun 2021 14:02:09 -0700 Subject: hw/arm: Add basic power management to raspi. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is just enough to make reboot and poweroff work. Works for linux, u-boot, and the arm trusted firmware. Not tested, but should work for plan9, and bare-metal/hobby OSes, since they seem to generally do what linux does for reset. The watchdog timer functionality is not yet implemented. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/64 Signed-off-by: Nolan Leake Reviewed-by: Philippe Mathieu-Daudé Tested-by: Philippe Mathieu-Daudé Message-id: 20210625210209.1870217-1-nolan@sigbus.net [PMM: tweaked commit title; fixed region size to 0x200; moved header file to include/] Signed-off-by: Peter Maydell --- hw/arm/bcm2835_peripherals.c | 13 ++- hw/misc/bcm2835_powermgt.c | 160 +++++++++++++++++++++++++++++++++++ hw/misc/meson.build | 1 + include/hw/arm/bcm2835_peripherals.h | 3 +- include/hw/misc/bcm2835_powermgt.h | 29 +++++++ 5 files changed, 204 insertions(+), 2 deletions(-) create mode 100644 hw/misc/bcm2835_powermgt.c create mode 100644 include/hw/misc/bcm2835_powermgt.h diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c index dcff13433e..48538c9360 100644 --- a/hw/arm/bcm2835_peripherals.c +++ b/hw/arm/bcm2835_peripherals.c @@ -126,6 +126,10 @@ static void bcm2835_peripherals_init(Object *obj) object_property_add_const_link(OBJECT(&s->dwc2), "dma-mr", OBJECT(&s->gpu_bus_mr)); + + /* Power Management */ + object_initialize_child(obj, "powermgt", &s->powermgt, + TYPE_BCM2835_POWERMGT); } static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) @@ -364,9 +368,16 @@ static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, INTERRUPT_USB)); + /* Power Management */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->powermgt), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, PM_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->powermgt), 0)); + create_unimp(s, &s->txp, "bcm2835-txp", TXP_OFFSET, 0x1000); create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40); - create_unimp(s, &s->powermgt, "bcm2835-powermgt", PM_OFFSET, 0x114); create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100); create_unimp(s, &s->smi, "bcm2835-smi", SMI_OFFSET, 0x100); create_unimp(s, &s->spi[0], "bcm2835-spi0", SPI0_OFFSET, 0x20); diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c new file mode 100644 index 0000000000..25fa804cbd --- /dev/null +++ b/hw/misc/bcm2835_powermgt.c @@ -0,0 +1,160 @@ +/* + * BCM2835 Power Management emulation + * + * Copyright (C) 2017 Marcin Chojnacki + * Copyright (C) 2021 Nolan Leake + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/bcm2835_powermgt.h" +#include "migration/vmstate.h" +#include "sysemu/runstate.h" + +#define PASSWORD 0x5a000000 +#define PASSWORD_MASK 0xff000000 + +#define R_RSTC 0x1c +#define V_RSTC_RESET 0x20 +#define R_RSTS 0x20 +#define V_RSTS_POWEROFF 0x555 /* Linux uses partition 63 to indicate halt. */ +#define R_WDOG 0x24 + +static uint64_t bcm2835_powermgt_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque; + uint32_t res = 0; + + switch (offset) { + case R_RSTC: + res = s->rstc; + break; + case R_RSTS: + res = s->rsts; + break; + case R_WDOG: + res = s->wdog; + break; + + default: + qemu_log_mask(LOG_UNIMP, + "bcm2835_powermgt_read: Unknown offset 0x%08"HWADDR_PRIx + "\n", offset); + res = 0; + break; + } + + return res; +} + +static void bcm2835_powermgt_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque; + + if ((value & PASSWORD_MASK) != PASSWORD) { + qemu_log_mask(LOG_GUEST_ERROR, + "bcm2835_powermgt_write: Bad password 0x%"PRIx64 + " at offset 0x%08"HWADDR_PRIx"\n", + value, offset); + return; + } + + value = value & ~PASSWORD_MASK; + + switch (offset) { + case R_RSTC: + s->rstc = value; + if (value & V_RSTC_RESET) { + if ((s->rsts & 0xfff) == V_RSTS_POWEROFF) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } else { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + } + break; + case R_RSTS: + qemu_log_mask(LOG_UNIMP, + "bcm2835_powermgt_write: RSTS\n"); + s->rsts = value; + break; + case R_WDOG: + qemu_log_mask(LOG_UNIMP, + "bcm2835_powermgt_write: WDOG\n"); + s->wdog = value; + break; + + default: + qemu_log_mask(LOG_UNIMP, + "bcm2835_powermgt_write: Unknown offset 0x%08"HWADDR_PRIx + "\n", offset); + break; + } +} + +static const MemoryRegionOps bcm2835_powermgt_ops = { + .read = bcm2835_powermgt_read, + .write = bcm2835_powermgt_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static const VMStateDescription vmstate_bcm2835_powermgt = { + .name = TYPE_BCM2835_POWERMGT, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(rstc, BCM2835PowerMgtState), + VMSTATE_UINT32(rsts, BCM2835PowerMgtState), + VMSTATE_UINT32(wdog, BCM2835PowerMgtState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_powermgt_init(Object *obj) +{ + BCM2835PowerMgtState *s = BCM2835_POWERMGT(obj); + + memory_region_init_io(&s->iomem, obj, &bcm2835_powermgt_ops, s, + TYPE_BCM2835_POWERMGT, 0x200); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static void bcm2835_powermgt_reset(DeviceState *dev) +{ + BCM2835PowerMgtState *s = BCM2835_POWERMGT(dev); + + /* https://elinux.org/BCM2835_registers#PM */ + s->rstc = 0x00000102; + s->rsts = 0x00001000; + s->wdog = 0x00000000; +} + +static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2835_powermgt_reset; + dc->vmsd = &vmstate_bcm2835_powermgt; +} + +static TypeInfo bcm2835_powermgt_info = { + .name = TYPE_BCM2835_POWERMGT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835PowerMgtState), + .class_init = bcm2835_powermgt_class_init, + .instance_init = bcm2835_powermgt_init, +}; + +static void bcm2835_powermgt_register_types(void) +{ + type_register_static(&bcm2835_powermgt_info); +} + +type_init(bcm2835_powermgt_register_types) diff --git a/hw/misc/meson.build b/hw/misc/meson.build index 66e1648533..f89b5c1643 100644 --- a/hw/misc/meson.build +++ b/hw/misc/meson.build @@ -82,6 +82,7 @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files( 'bcm2835_rng.c', 'bcm2835_thermal.c', 'bcm2835_cprman.c', + 'bcm2835_powermgt.c', )) softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c')) softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c', 'zynq-xadc.c')) diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h index 479e2346e8..d864879421 100644 --- a/include/hw/arm/bcm2835_peripherals.h +++ b/include/hw/arm/bcm2835_peripherals.h @@ -24,6 +24,7 @@ #include "hw/misc/bcm2835_mphi.h" #include "hw/misc/bcm2835_thermal.h" #include "hw/misc/bcm2835_cprman.h" +#include "hw/misc/bcm2835_powermgt.h" #include "hw/sd/sdhci.h" #include "hw/sd/bcm2835_sdhost.h" #include "hw/gpio/bcm2835_gpio.h" @@ -48,7 +49,7 @@ struct BCM2835PeripheralState { BCM2835MphiState mphi; UnimplementedDeviceState txp; UnimplementedDeviceState armtmr; - UnimplementedDeviceState powermgt; + BCM2835PowerMgtState powermgt; BCM2835CprmanState cprman; PL011State uart0; BCM2835AuxState aux; diff --git a/include/hw/misc/bcm2835_powermgt.h b/include/hw/misc/bcm2835_powermgt.h new file mode 100644 index 0000000000..303b9a6f68 --- /dev/null +++ b/include/hw/misc/bcm2835_powermgt.h @@ -0,0 +1,29 @@ +/* + * BCM2835 Power Management emulation + * + * Copyright (C) 2017 Marcin Chojnacki + * Copyright (C) 2021 Nolan Leake + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef BCM2835_POWERMGT_H +#define BCM2835_POWERMGT_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_BCM2835_POWERMGT "bcm2835-powermgt" +OBJECT_DECLARE_SIMPLE_TYPE(BCM2835PowerMgtState, BCM2835_POWERMGT) + +struct BCM2835PowerMgtState { + SysBusDevice busdev; + MemoryRegion iomem; + + uint32_t rstc; + uint32_t rsts; + uint32_t wdog; +}; + +#endif -- cgit v1.2.3 From 95079d5c79a315426fef19b0245db06b71e6c863 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Mon, 31 May 2021 13:38:37 +0200 Subject: tests: Boot and halt a Linux guest on the Raspberry Pi 2 machine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a test booting and quickly shutdown a raspi2 machine, to test the power management model: (1/1) tests/acceptance/boot_linux_console.py:BootLinuxConsole.test_arm_raspi2_initrd: console: [ 0.000000] Booting Linux on physical CPU 0xf00 console: [ 0.000000] Linux version 4.14.98-v7+ (dom@dom-XPS-13-9370) (gcc version 4.9.3 (crosstool-NG crosstool-ng-1.22.0-88-g8460611)) #1200 SMP Tue Feb 12 20:27:48 GMT 2019 console: [ 0.000000] CPU: ARMv7 Processor [410fc075] revision 5 (ARMv7), cr=10c5387d console: [ 0.000000] CPU: div instructions available: patching division code console: [ 0.000000] CPU: PIPT / VIPT nonaliasing data cache, VIPT aliasing instruction cache console: [ 0.000000] OF: fdt: Machine model: Raspberry Pi 2 Model B ... console: Boot successful. console: cat /proc/cpuinfo console: / # cat /proc/cpuinfo ... console: processor : 3 console: model name : ARMv7 Processor rev 5 (v7l) console: BogoMIPS : 125.00 console: Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm console: CPU implementer : 0x41 console: CPU architecture: 7 console: CPU variant : 0x0 console: CPU part : 0xc07 console: CPU revision : 5 console: Hardware : BCM2835 console: Revision : 0000 console: Serial : 0000000000000000 console: cat /proc/iomem console: / # cat /proc/iomem console: 00000000-3bffffff : System RAM console: 00008000-00afffff : Kernel code console: 00c00000-00d468ef : Kernel data console: 3f006000-3f006fff : dwc_otg console: 3f007000-3f007eff : /soc/dma@7e007000 console: 3f00b880-3f00b8bf : /soc/mailbox@7e00b880 console: 3f100000-3f100027 : /soc/watchdog@7e100000 console: 3f101000-3f102fff : /soc/cprman@7e101000 console: 3f200000-3f2000b3 : /soc/gpio@7e200000 PASS (24.59 s) RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0 JOB TIME : 25.02 s Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Wainer dos Santos Moschetta Message-id: 20210531113837.1689775-1-f4bug@amsat.org Signed-off-by: Peter Maydell --- tests/acceptance/boot_linux_console.py | 43 ++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/tests/acceptance/boot_linux_console.py b/tests/acceptance/boot_linux_console.py index cded547d1d..3ae11a7a8f 100644 --- a/tests/acceptance/boot_linux_console.py +++ b/tests/acceptance/boot_linux_console.py @@ -16,6 +16,7 @@ import shutil from avocado import skip from avocado import skipUnless from avocado_qemu import Test +from avocado_qemu import exec_command from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import interrupt_interactive_console_until_pattern from avocado_qemu import wait_for_console_pattern @@ -477,6 +478,48 @@ class BootLinuxConsole(LinuxKernelTest): """ self.do_test_arm_raspi2(0) + def test_arm_raspi2_initrd(self): + """ + :avocado: tags=arch:arm + :avocado: tags=machine:raspi2 + """ + deb_url = ('http://archive.raspberrypi.org/debian/' + 'pool/main/r/raspberrypi-firmware/' + 'raspberrypi-kernel_1.20190215-1_armhf.deb') + deb_hash = 'cd284220b32128c5084037553db3c482426f3972' + deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) + kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img') + dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb') + + initrd_url = ('https://github.com/groeck/linux-build-test/raw/' + '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' + 'arm/rootfs-armv7a.cpio.gz') + initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c' + initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) + initrd_path = os.path.join(self.workdir, 'rootfs.cpio') + archive.gzip_uncompress(initrd_path_gz, initrd_path) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'earlycon=pl011,0x3f201000 console=ttyAMA0 ' + 'panic=-1 noreboot ' + + 'dwc_otg.fiq_fsm_enable=0') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'BCM2835') + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', + '/soc/cprman@7e101000') + exec_command(self, 'halt') + # Wait for VM to shut down gracefully + self.vm.wait() + def test_arm_exynos4210_initrd(self): """ :avocado: tags=arch:arm -- cgit v1.2.3 From 103e7579ddbd539fbe38e150da78264d0496023a Mon Sep 17 00:00:00 2001 From: Joe Komlodi Date: Fri, 25 Jun 2021 16:02:54 -0700 Subject: target/arm: Check NaN mode before silencing NaN If the CPU is running in default NaN mode (FPCR.DN == 1) and we execute FRSQRTE, FRECPE, or FRECPX with a signaling NaN, parts_silence_nan_frac() will assert due to fpst->default_nan_mode being set. To avoid this, we check to see what NaN mode we're running in before we call floatxx_silence_nan(). Signed-off-by: Joe Komlodi Reviewed-by: Richard Henderson Message-id: 1624662174-175828-2-git-send-email-joe.komlodi@xilinx.com Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/helper-a64.c | 12 +++++++++--- target/arm/vfp_helper.c | 24 ++++++++++++++++++------ 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c index 9cc3b066e2..ac5c4452d5 100644 --- a/target/arm/helper-a64.c +++ b/target/arm/helper-a64.c @@ -365,7 +365,9 @@ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp) float16 nan = a; if (float16_is_signaling_nan(a, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float16_silence_nan(a, fpst); + if (!fpst->default_nan_mode) { + nan = float16_silence_nan(a, fpst); + } } if (fpst->default_nan_mode) { nan = float16_default_nan(fpst); @@ -396,7 +398,9 @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp) float32 nan = a; if (float32_is_signaling_nan(a, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float32_silence_nan(a, fpst); + if (!fpst->default_nan_mode) { + nan = float32_silence_nan(a, fpst); + } } if (fpst->default_nan_mode) { nan = float32_default_nan(fpst); @@ -427,7 +431,9 @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp) float64 nan = a; if (float64_is_signaling_nan(a, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float64_silence_nan(a, fpst); + if (!fpst->default_nan_mode) { + nan = float64_silence_nan(a, fpst); + } } if (fpst->default_nan_mode) { nan = float64_default_nan(fpst); diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 8a71660059..24e3d820a5 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -671,7 +671,9 @@ uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) float16 nan = f16; if (float16_is_signaling_nan(f16, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float16_silence_nan(f16, fpst); + if (!fpst->default_nan_mode) { + nan = float16_silence_nan(f16, fpst); + } } if (fpst->default_nan_mode) { nan = float16_default_nan(fpst); @@ -719,7 +721,9 @@ float32 HELPER(recpe_f32)(float32 input, void *fpstp) float32 nan = f32; if (float32_is_signaling_nan(f32, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float32_silence_nan(f32, fpst); + if (!fpst->default_nan_mode) { + nan = float32_silence_nan(f32, fpst); + } } if (fpst->default_nan_mode) { nan = float32_default_nan(fpst); @@ -767,7 +771,9 @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp) float64 nan = f64; if (float64_is_signaling_nan(f64, fpst)) { float_raise(float_flag_invalid, fpst); - nan = float64_silence_nan(f64, fpst); + if (!fpst->default_nan_mode) { + nan = float64_silence_nan(f64, fpst); + } } if (fpst->default_nan_mode) { nan = float64_default_nan(fpst); @@ -866,7 +872,9 @@ uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) float16 nan = f16; if (float16_is_signaling_nan(f16, s)) { float_raise(float_flag_invalid, s); - nan = float16_silence_nan(f16, s); + if (!s->default_nan_mode) { + nan = float16_silence_nan(f16, fpstp); + } } if (s->default_nan_mode) { nan = float16_default_nan(s); @@ -910,7 +918,9 @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) float32 nan = f32; if (float32_is_signaling_nan(f32, s)) { float_raise(float_flag_invalid, s); - nan = float32_silence_nan(f32, s); + if (!s->default_nan_mode) { + nan = float32_silence_nan(f32, fpstp); + } } if (s->default_nan_mode) { nan = float32_default_nan(s); @@ -953,7 +963,9 @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) float64 nan = f64; if (float64_is_signaling_nan(f64, s)) { float_raise(float_flag_invalid, s); - nan = float64_silence_nan(f64, s); + if (!s->default_nan_mode) { + nan = float64_silence_nan(f64, fpstp); + } } if (s->default_nan_mode) { nan = float64_default_nan(s); -- cgit v1.2.3 From e3bcf57c1a3c498fe7bd1f18744614a802d8859a Mon Sep 17 00:00:00 2001 From: Maxim Uvarov Date: Fri, 25 Jun 2021 14:18:42 +0300 Subject: hw/gpio/gpio_pwr: use shutdown function for reboot qemu has 2 type of functions: shutdown and reboot. Shutdown function has to be used for machine shutdown. Otherwise we cause a reset with a bogus "cause" value, when we intended a shutdown. Signed-off-by: Maxim Uvarov Reviewed-by: Peter Maydell Message-id: 20210625111842.3790-3-maxim.uvarov@linaro.org [PMM: tweaked commit message] Signed-off-by: Peter Maydell --- hw/gpio/gpio_pwr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c index 7714fa0dc4..dbaf1c70c8 100644 --- a/hw/gpio/gpio_pwr.c +++ b/hw/gpio/gpio_pwr.c @@ -43,7 +43,7 @@ static void gpio_pwr_reset(void *opaque, int n, int level) static void gpio_pwr_shutdown(void *opaque, int n, int level) { if (level) { - qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } } -- cgit v1.2.3 From d59ccc30f64249d5727bc084e0f3cf4b2483117b Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:18 +0100 Subject: target/arm: Fix MVE widening/narrowing VLDR/VSTR offset calculation In do_ldst(), the calculation of the offset needs to be based on the size of the memory access, not the size of the elements in the vector. This meant we were getting it wrong for the widening and narrowing variants of the various VLDR and VSTR insns. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-2-peter.maydell@linaro.org --- target/arm/translate-mve.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index 67462bdf27..e9a5442a72 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -120,7 +120,8 @@ static bool mve_skip_first_beat(DisasContext *s) } } -static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn) +static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn, + unsigned msize) { TCGv_i32 addr; uint32_t offset; @@ -141,7 +142,7 @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn) return true; } - offset = a->imm << a->size; + offset = a->imm << msize; if (!a->a) { offset = -offset; } @@ -178,22 +179,22 @@ static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a) { gen_helper_mve_vstrw, gen_helper_mve_vldrw }, { NULL, NULL } }; - return do_ldst(s, a, ldstfns[a->size][a->l]); + return do_ldst(s, a, ldstfns[a->size][a->l], a->size); } -#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST) \ +#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST, MSIZE) \ static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \ { \ static MVEGenLdStFn * const ldstfns[2][2] = { \ { gen_helper_mve_##ST, gen_helper_mve_##SLD }, \ { NULL, gen_helper_mve_##ULD }, \ }; \ - return do_ldst(s, a, ldstfns[a->u][a->l]); \ + return do_ldst(s, a, ldstfns[a->u][a->l], MSIZE); \ } -DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h) -DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w) -DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w) +DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8) +DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8) +DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16) static bool trans_VDUP(DisasContext *s, arg_VDUP *a) { -- cgit v1.2.3 From 303db86fc73c68d8774203d4796b9995cc122886 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:19 +0100 Subject: target/arm: Fix bugs in MVE VRMLALDAVH, VRMLSLDAVH The initial implementation of the MVE VRMLALDAVH and VRMLSLDAVH insns had some bugs: * the 32x32 multiply of elements was being done as 32x32->32, not 32x32->64 * we were incorrectly maintaining the accumulator in its full 72-bit form across all 4 beats of the insn; in the pseudocode it is squashed back into the 64 bits of the RdaHi:RdaLo registers after each beat In particular, fixing the second of these allows us to recast the implementation to avoid 128-bit arithmetic entirely. Since the element size here is always 4, we can also drop the parameterization of ESIZE to make the code a little more readable. Suggested-by: Richard Henderson Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-3-peter.maydell@linaro.org --- target/arm/mve_helper.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 05552ce7ee..85a552fe07 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu/int128.h" #include "cpu.h" #include "internals.h" #include "vec_internal.h" @@ -1100,40 +1099,45 @@ DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) /* - * Rounding multiply add long dual accumulate high: we must keep - * a 72-bit internal accumulator value and return the top 64 bits. + * Rounding multiply add long dual accumulate high. In the pseudocode + * this is implemented with a 72-bit internal accumulator value of which + * the top 64 bits are returned. We optimize this to avoid having to + * use 128-bit arithmetic -- we can do this because the 74-bit accumulator + * is squashed back into 64-bits after each beat. */ -#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \ +#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ void *vm, uint64_t a) \ { \ uint16_t mask = mve_element_mask(env); \ unsigned e; \ TYPE *n = vn, *m = vm; \ - Int128 acc = int128_lshift(TO128(a), 8); \ - for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ if (mask & 1) { \ + LTYPE mul; \ if (e & 1) { \ - acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \ - m[H##ESIZE(e)])); \ + mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \ + if (SUB) { \ + mul = -mul; \ + } \ } else { \ - acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \ - m[H##ESIZE(e)])); \ + mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \ } \ - acc = int128_add(acc, int128_make64(1 << 7)); \ + mul = (mul >> 8) + ((mul >> 7) & 1); \ + a += mul; \ } \ } \ mve_advance_vpt(env); \ - return int128_getlo(int128_rshift(acc, 8)); \ + return a; \ } -DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64) -DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64) +DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false) +DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false) -DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64) +DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false) -DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64) -DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64) +DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true) +DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true) /* Vector add across vector */ #define DO_VADDV(OP, ESIZE, TYPE) \ -- cgit v1.2.3 From dfd66bc0f37dde37b8b2d7bad3a7075332e75fb4 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:20 +0100 Subject: target/arm: Make asimd_imm_const() public The function asimd_imm_const() in translate-neon.c is an implementation of the pseudocode AdvSIMDExpandImm(), which we will also want for MVE. Move the implementation to translate.c, with a prototype in translate.h. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-4-peter.maydell@linaro.org --- target/arm/translate-neon.c | 63 --------------------------------------------- target/arm/translate.c | 57 ++++++++++++++++++++++++++++++++++++++++ target/arm/translate.h | 16 ++++++++++++ 3 files changed, 73 insertions(+), 63 deletions(-) diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c index 633fef3bf7..f915f70970 100644 --- a/target/arm/translate-neon.c +++ b/target/arm/translate-neon.c @@ -1781,69 +1781,6 @@ DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh) DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs) DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu) -static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op) -{ - /* - * Expand the encoded constant. - * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE. - * We choose to not special-case this and will behave as if a - * valid constant encoding of 0 had been given. - * cmode = 15 op = 1 must UNDEF; we assume decode has handled that. - */ - switch (cmode) { - case 0: case 1: - /* no-op */ - break; - case 2: case 3: - imm <<= 8; - break; - case 4: case 5: - imm <<= 16; - break; - case 6: case 7: - imm <<= 24; - break; - case 8: case 9: - imm |= imm << 16; - break; - case 10: case 11: - imm = (imm << 8) | (imm << 24); - break; - case 12: - imm = (imm << 8) | 0xff; - break; - case 13: - imm = (imm << 16) | 0xffff; - break; - case 14: - if (op) { - /* - * This is the only case where the top and bottom 32 bits - * of the encoded constant differ. - */ - uint64_t imm64 = 0; - int n; - - for (n = 0; n < 8; n++) { - if (imm & (1 << n)) { - imm64 |= (0xffULL << (n * 8)); - } - } - return imm64; - } - imm |= (imm << 8) | (imm << 16) | (imm << 24); - break; - case 15: - imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) - | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); - break; - } - if (op) { - imm = ~imm; - } - return dup_const(MO_32, imm); -} - static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a, GVecGen2iFn *fn) { diff --git a/target/arm/translate.c b/target/arm/translate.c index a0c6cfa902..95ceb24ec3 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -90,6 +90,63 @@ void arm_translate_init(void) a64_translate_init(); } +uint64_t asimd_imm_const(uint32_t imm, int cmode, int op) +{ + /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */ + switch (cmode) { + case 0: case 1: + /* no-op */ + break; + case 2: case 3: + imm <<= 8; + break; + case 4: case 5: + imm <<= 16; + break; + case 6: case 7: + imm <<= 24; + break; + case 8: case 9: + imm |= imm << 16; + break; + case 10: case 11: + imm = (imm << 8) | (imm << 24); + break; + case 12: + imm = (imm << 8) | 0xff; + break; + case 13: + imm = (imm << 16) | 0xffff; + break; + case 14: + if (op) { + /* + * This is the only case where the top and bottom 32 bits + * of the encoded constant differ. + */ + uint64_t imm64 = 0; + int n; + + for (n = 0; n < 8; n++) { + if (imm & (1 << n)) { + imm64 |= (0xffULL << (n * 8)); + } + } + return imm64; + } + imm |= (imm << 8) | (imm << 16) | (imm << 24); + break; + case 15: + imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) + | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); + break; + } + if (op) { + imm = ~imm; + } + return dup_const(MO_32, imm); +} + /* Generate a label used for skipping this instruction */ void arm_gen_condlabel(DisasContext *s) { diff --git a/target/arm/translate.h b/target/arm/translate.h index 99c917c571..6c8d5f6ede 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -532,4 +532,20 @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc) return opc | s->be_data; } +/** + * asimd_imm_const: Expand an encoded SIMD constant value + * + * Expand a SIMD constant value. This is essentially the pseudocode + * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for + * VMVN and VBIC (when cmode < 14 && op == 1). + * + * The combination cmode == 15 op == 1 is a reserved encoding for AArch32; + * callers must catch this. + * + * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but + * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A; + * we produce an immediate constant value of 0 in these cases. + */ +uint64_t asimd_imm_const(uint32_t imm, int cmode, int op); + #endif /* TARGET_ARM_TRANSLATE_H */ -- cgit v1.2.3 From 2c0286dba46526ee6c23b1f28af62a857dace704 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:21 +0100 Subject: target/arm: Use asimd_imm_const for A64 decode The A64 AdvSIMD modified-immediate grouping uses almost the same constant encoding that A32 Neon does; reuse asimd_imm_const() (to which we add the AArch64-specific case for cmode 15 op 1) instead of reimplementing it all. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-5-peter.maydell@linaro.org --- target/arm/translate-a64.c | 86 ++++------------------------------------------ target/arm/translate.c | 17 +++++++-- target/arm/translate.h | 3 +- 3 files changed, 24 insertions(+), 82 deletions(-) diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 1a40e49db7..66781f71cb 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -8190,8 +8190,6 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) { int rd = extract32(insn, 0, 5); int cmode = extract32(insn, 12, 4); - int cmode_3_1 = extract32(cmode, 1, 3); - int cmode_0 = extract32(cmode, 0, 1); int o2 = extract32(insn, 11, 1); uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5); bool is_neg = extract32(insn, 29, 1); @@ -8210,83 +8208,13 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) return; } - /* See AdvSIMDExpandImm() in ARM ARM */ - switch (cmode_3_1) { - case 0: /* Replicate(Zeros(24):imm8, 2) */ - case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */ - case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */ - case 3: /* Replicate(imm8:Zeros(24), 2) */ - { - int shift = cmode_3_1 * 8; - imm = bitfield_replicate(abcdefgh << shift, 32); - break; - } - case 4: /* Replicate(Zeros(8):imm8, 4) */ - case 5: /* Replicate(imm8:Zeros(8), 4) */ - { - int shift = (cmode_3_1 & 0x1) * 8; - imm = bitfield_replicate(abcdefgh << shift, 16); - break; - } - case 6: - if (cmode_0) { - /* Replicate(Zeros(8):imm8:Ones(16), 2) */ - imm = (abcdefgh << 16) | 0xffff; - } else { - /* Replicate(Zeros(16):imm8:Ones(8), 2) */ - imm = (abcdefgh << 8) | 0xff; - } - imm = bitfield_replicate(imm, 32); - break; - case 7: - if (!cmode_0 && !is_neg) { - imm = bitfield_replicate(abcdefgh, 8); - } else if (!cmode_0 && is_neg) { - int i; - imm = 0; - for (i = 0; i < 8; i++) { - if ((abcdefgh) & (1 << i)) { - imm |= 0xffULL << (i * 8); - } - } - } else if (cmode_0) { - if (is_neg) { - imm = (abcdefgh & 0x3f) << 48; - if (abcdefgh & 0x80) { - imm |= 0x8000000000000000ULL; - } - if (abcdefgh & 0x40) { - imm |= 0x3fc0000000000000ULL; - } else { - imm |= 0x4000000000000000ULL; - } - } else { - if (o2) { - /* FMOV (vector, immediate) - half-precision */ - imm = vfp_expand_imm(MO_16, abcdefgh); - /* now duplicate across the lanes */ - imm = bitfield_replicate(imm, 16); - } else { - imm = (abcdefgh & 0x3f) << 19; - if (abcdefgh & 0x80) { - imm |= 0x80000000; - } - if (abcdefgh & 0x40) { - imm |= 0x3e000000; - } else { - imm |= 0x40000000; - } - imm |= (imm << 32); - } - } - } - break; - default: - g_assert_not_reached(); - } - - if (cmode_3_1 != 7 && is_neg) { - imm = ~imm; + if (cmode == 15 && o2 && !is_neg) { + /* FMOV (vector, immediate) - half-precision */ + imm = vfp_expand_imm(MO_16, abcdefgh); + /* now duplicate across the lanes */ + imm = bitfield_replicate(imm, 16); + } else { + imm = asimd_imm_const(abcdefgh, cmode, is_neg); } if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) { diff --git a/target/arm/translate.c b/target/arm/translate.c index 95ceb24ec3..66b24ab56e 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -121,8 +121,8 @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op) case 14: if (op) { /* - * This is the only case where the top and bottom 32 bits - * of the encoded constant differ. + * This and cmode == 15 op == 1 are the only cases where + * the top and bottom 32 bits of the encoded constant differ. */ uint64_t imm64 = 0; int n; @@ -137,6 +137,19 @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op) imm |= (imm << 8) | (imm << 16) | (imm << 24); break; case 15: + if (op) { + /* Reserved encoding for AArch32; valid for AArch64 */ + uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48; + if (imm & 0x80) { + imm64 |= 0x8000000000000000ULL; + } + if (imm & 0x40) { + imm64 |= 0x3fc0000000000000ULL; + } else { + imm64 |= 0x4000000000000000ULL; + } + return imm64; + } imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); break; diff --git a/target/arm/translate.h b/target/arm/translate.h index 6c8d5f6ede..e2f056c32c 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -540,7 +540,8 @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc) * VMVN and VBIC (when cmode < 14 && op == 1). * * The combination cmode == 15 op == 1 is a reserved encoding for AArch32; - * callers must catch this. + * callers must catch this; we return the 64-bit constant value defined + * for AArch64. * * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A; -- cgit v1.2.3 From e4667a5b5e71d83e3e2af70e7dba4bfab8892829 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:22 +0100 Subject: target/arm: Use dup_const() instead of bitfield_replicate() Use dup_const() instead of bitfield_replicate() in disas_simd_mod_imm(). (We can't replace the other use of bitfield_replicate() in this file, in logic_imm_decode_wmask(), because that location needs to handle 2 and 4 bit elements, which dup_const() cannot.) Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-6-peter.maydell@linaro.org --- target/arm/translate-a64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 66781f71cb..e81cc20d04 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -8212,7 +8212,7 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) /* FMOV (vector, immediate) - half-precision */ imm = vfp_expand_imm(MO_16, abcdefgh); /* now duplicate across the lanes */ - imm = bitfield_replicate(imm, 16); + imm = dup_const(MO_16, imm); } else { imm = asimd_imm_const(abcdefgh, cmode, is_neg); } -- cgit v1.2.3 From eab84139855dac258c8d89ad736f6649e3edc76a Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:23 +0100 Subject: target/arm: Implement MVE logical immediate insns Implement the MVE logical-immediate insns (VMOV, VMVN, VORR and VBIC). These have essentially the same encoding as their Neon equivalents, and we implement the decode in the same way. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-7-peter.maydell@linaro.org --- target/arm/helper-mve.h | 4 ++++ target/arm/mve.decode | 17 ++++++++++++++++ target/arm/mve_helper.c | 24 ++++++++++++++++++++++ target/arm/translate-mve.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 95 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 4bbb9b3ae2..5248dbe825 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -355,3 +355,7 @@ DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32) + +DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64) +DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64) +DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index d9ece7be5d..caeb016c12 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -26,10 +26,14 @@ # VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit %size_28 28:1 !function=plus_1 +# 1imm format immediate +%imm_28_16_0 28:1 16:3 0:4 + &vldr_vstr rn qd imm p a w size l u &1op qd qm size &2op qd qm qn size &2scalar qd qn rm size +&1imm qd imm cmode op @vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0 # Note that both Rn and Qd are 3 bits only (no D bit) @@ -41,6 +45,7 @@ @2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0 @2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \ size=%size_28 +@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0 # The _rev suffix indicates that Vn and Vm are reversed. This is # the case for shifts. In the Arm ARM these insns are documented @@ -258,3 +263,15 @@ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rd # Predicate operations %mask_22_13 22:1 13:3 VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13 + +# Logical immediate operations (1 reg and modified-immediate) + +# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but +# not in a way we can conveniently represent in decodetree without +# a lot of repetition: +# VORR: op=0, (cmode & 1) && cmode < 12 +# VBIC: op=1, (cmode & 1) && cmode < 12 +# VMOV: everything else +# So we have a single decode line and check the cmode/op in the +# trans function. +Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 85a552fe07..e6ced14467 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -323,6 +323,30 @@ DO_1OP(vnegw, 4, int32_t, DO_NEG) DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH) DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS) +/* + * 1 operand immediates: Vda is destination and possibly also one source. + * All these insns work at 64-bit widths. + */ +#define DO_1OP_IMM(OP, FN) \ + void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \ + { \ + uint64_t *da = vda; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ + mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_MOVI(N, I) (I) +#define DO_ANDI(N, I) ((N) & (I)) +#define DO_ORRI(N, I) ((N) | (I)) + +DO_1OP_IMM(vmovi, DO_MOVI) +DO_1OP_IMM(vandi, DO_ANDI) +DO_1OP_IMM(vorri, DO_ORRI) + #define DO_2OP(OP, ESIZE, TYPE, FN) \ void HELPER(glue(mve_, OP))(CPUARMState *env, \ void *vd, void *vn, void *vm) \ diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index e9a5442a72..f435a1cfd9 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -34,6 +34,7 @@ typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64); typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32); +typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64); /* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */ static inline long mve_qreg_offset(unsigned reg) @@ -787,3 +788,52 @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a) mve_update_eci(s); return true; } + +static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn) +{ + TCGv_ptr qd; + uint64_t imm; + + if (!dc_isar_feature(aa32_mve, s) || + !mve_check_qreg_bank(s, a->qd) || + !fn) { + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + imm = asimd_imm_const(a->imm, a->cmode, a->op); + + qd = mve_qreg_ptr(a->qd); + fn(cpu_env, qd, tcg_constant_i64(imm)); + tcg_temp_free_ptr(qd); + mve_update_eci(s); + return true; +} + +static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a) +{ + /* Handle decode of cmode/op here between VORR/VBIC/VMOV */ + MVEGenOneOpImmFn *fn; + + if ((a->cmode & 1) && a->cmode < 12) { + if (a->op) { + /* + * For op=1, the immediate will be inverted by asimd_imm_const(), + * so the VBIC becomes a logical AND operation. + */ + fn = gen_helper_mve_vandi; + } else { + fn = gen_helper_mve_vorri; + } + } else { + /* There is one unallocated cmode/op combination in this space */ + if (a->cmode == 15 && a->op == 1) { + return false; + } + /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */ + fn = gen_helper_mve_vmovi; + } + return do_1imm(s, a, fn); +} -- cgit v1.2.3 From f9ed61741e5f26ee1bb933a87669697901d9327d Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:24 +0100 Subject: target/arm: Implement MVE vector shift left by immediate insns Implement the MVE shift-vector-left-by-immediate insns VSHL, VQSHL and VQSHLU. The size-and-immediate encoding here is the same as Neon, and we handle it the same way neon-dp.decode does. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-8-peter.maydell@linaro.org --- target/arm/helper-mve.h | 16 +++++++++++++ target/arm/mve.decode | 23 +++++++++++++++++++ target/arm/mve_helper.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++ target/arm/translate-mve.c | 51 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 147 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 5248dbe825..8cd7c6a0d8 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -359,3 +359,19 @@ DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64) DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64) DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64) + +DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index caeb016c12..183eb731d2 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -34,6 +34,7 @@ &2op qd qm qn size &2scalar qd qn rm size &1imm qd imm cmode op +&2shift qd qm shift size @vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0 # Note that both Rn and Qd are 3 bits only (no D bit) @@ -59,6 +60,10 @@ @2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn @2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn +@2_shl_b .... .... .. 001 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0 +@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1 +@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2 + # Vector loads and stores # Widening loads and narrowing stores: @@ -275,3 +280,21 @@ VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13 # So we have a single decode line and check the cmode/op in the # trans function. Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm + +# Shifts by immediate + +VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b +VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h +VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w + +VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b +VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h +VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w + +VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b +VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h +VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w + +VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b +VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h +VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index e6ced14467..285c8b56f7 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -733,6 +733,8 @@ DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W) WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp) #define DO_UQRSHL_OP(N, M, satp) \ WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp) +#define DO_SUQSHL_OP(N, M, satp) \ + WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp) DO_2OP_SAT_S(vqshls, DO_SQSHL_OP) DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP) @@ -1186,3 +1188,58 @@ DO_VADDV(vaddvsw, 4, uint32_t) DO_VADDV(vaddvub, 1, uint8_t) DO_VADDV(vaddvuh, 2, uint16_t) DO_VADDV(vaddvuw, 4, uint32_t) + +/* Shifts by immediate */ +#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ + void *vm, uint32_t shift) \ + { \ + TYPE *d = vd, *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + mergemask(&d[H##ESIZE(e)], \ + FN(m[H##ESIZE(e)], shift), mask); \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ + void *vm, uint32_t shift) \ + { \ + TYPE *d = vd, *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + bool qc = false; \ + for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + bool sat = false; \ + mergemask(&d[H##ESIZE(e)], \ + FN(m[H##ESIZE(e)], shift, &sat), mask); \ + qc |= sat & mask & 1; \ + } \ + if (qc) { \ + env->vfp.qc[0] = qc; \ + } \ + mve_advance_vpt(env); \ + } + +/* provide unsigned 2-op shift helpers for all sizes */ +#define DO_2SHIFT_U(OP, FN) \ + DO_2SHIFT(OP##b, 1, uint8_t, FN) \ + DO_2SHIFT(OP##h, 2, uint16_t, FN) \ + DO_2SHIFT(OP##w, 4, uint32_t, FN) + +#define DO_2SHIFT_SAT_U(OP, FN) \ + DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \ + DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \ + DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN) +#define DO_2SHIFT_SAT_S(OP, FN) \ + DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \ + DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \ + DO_2SHIFT_SAT(OP##w, 4, int32_t, FN) + +DO_2SHIFT_U(vshli_u, DO_VSHLU) +DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP) +DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP) +DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index f435a1cfd9..fc8a2da6e1 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -32,6 +32,7 @@ typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); +typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64); typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64); @@ -837,3 +838,53 @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a) } return do_1imm(s, a, fn); } + +static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, + bool negateshift) +{ + TCGv_ptr qd, qm; + int shift = a->shift; + + if (!dc_isar_feature(aa32_mve, s) || + !mve_check_qreg_bank(s, a->qd | a->qm) || + !fn) { + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + /* + * When we handle a right shift insn using a left-shift helper + * which permits a negative shift count to indicate a right-shift, + * we must negate the shift count. + */ + if (negateshift) { + shift = -shift; + } + + qd = mve_qreg_ptr(a->qd); + qm = mve_qreg_ptr(a->qm); + fn(cpu_env, qd, qm, tcg_constant_i32(shift)); + tcg_temp_free_ptr(qd); + tcg_temp_free_ptr(qm); + mve_update_eci(s); + return true; +} + +#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \ + static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ + { \ + static MVEGenTwoOpShiftFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##w, \ + NULL, \ + }; \ + return do_2shift(s, a, fns[a->size], NEGATESHIFT); \ + } + +DO_2SHIFT(VSHLI, vshli_u, false) +DO_2SHIFT(VQSHLI_S, vqshli_s, false) +DO_2SHIFT(VQSHLI_U, vqshli_u, false) +DO_2SHIFT(VQSHLUI, vqshlui_s, false) -- cgit v1.2.3 From 3394116f47d12bb577ee44493d3d61a30ec9dd68 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:25 +0100 Subject: target/arm: Implement MVE vector shift right by immediate insns Implement the MVE vector shift right by immediate insns VSHRI and VRSHRI. As with Neon, we implement these by using helper functions which perform left shifts but allow negative shift counts to indicate right shifts. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-9-peter.maydell@linaro.org --- target/arm/helper-mve.h | 12 ++++++++++++ target/arm/mve.decode | 28 ++++++++++++++++++++++++++++ target/arm/mve_helper.c | 7 +++++++ target/arm/translate-mve.c | 5 +++++ target/arm/translate-neon.c | 18 ------------------ target/arm/translate.h | 20 ++++++++++++++++++++ 6 files changed, 72 insertions(+), 18 deletions(-) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 8cd7c6a0d8..288a8faf4e 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -360,6 +360,10 @@ DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64) DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64) DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64) +DEF_HELPER_FLAGS_4(mve_vshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) @@ -375,3 +379,11 @@ DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 183eb731d2..8be04589a6 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -64,6 +64,18 @@ @2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1 @2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2 +# Right shifts are encoded as N - shift, where N is the element size in bits. +%rshift_i5 16:5 !function=rsub_32 +%rshift_i4 16:4 !function=rsub_16 +%rshift_i3 16:3 !function=rsub_8 + +@2_shr_b .... .... .. 001 ... .... .... .... .... &2shift qd=%qd qm=%qm \ + size=0 shift=%rshift_i3 +@2_shr_h .... .... .. 01 .... .... .... .... .... &2shift qd=%qd qm=%qm \ + size=1 shift=%rshift_i4 +@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \ + size=2 shift=%rshift_i5 + # Vector loads and stores # Widening loads and narrowing stores: @@ -298,3 +310,19 @@ VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w + +VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b +VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h +VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w + +VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b +VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h +VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w + +VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b +VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h +VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w + +VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b +VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h +VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 285c8b56f7..ac720c9ee0 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1229,6 +1229,10 @@ DO_VADDV(vaddvuw, 4, uint32_t) DO_2SHIFT(OP##b, 1, uint8_t, FN) \ DO_2SHIFT(OP##h, 2, uint16_t, FN) \ DO_2SHIFT(OP##w, 4, uint32_t, FN) +#define DO_2SHIFT_S(OP, FN) \ + DO_2SHIFT(OP##b, 1, int8_t, FN) \ + DO_2SHIFT(OP##h, 2, int16_t, FN) \ + DO_2SHIFT(OP##w, 4, int32_t, FN) #define DO_2SHIFT_SAT_U(OP, FN) \ DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \ @@ -1240,6 +1244,9 @@ DO_VADDV(vaddvuw, 4, uint32_t) DO_2SHIFT_SAT(OP##w, 4, int32_t, FN) DO_2SHIFT_U(vshli_u, DO_VSHLU) +DO_2SHIFT_S(vshli_s, DO_VSHLS) DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP) DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP) DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) +DO_2SHIFT_U(vrshli_u, DO_VRSHLU) +DO_2SHIFT_S(vrshli_s, DO_VRSHLS) diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index fc8a2da6e1..4030ee07f0 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -888,3 +888,8 @@ DO_2SHIFT(VSHLI, vshli_u, false) DO_2SHIFT(VQSHLI_S, vqshli_s, false) DO_2SHIFT(VQSHLI_U, vqshli_u, false) DO_2SHIFT(VQSHLUI, vqshlui_s, false) +/* These right shifts use a left-shift helper with negated shift count */ +DO_2SHIFT(VSHRI_S, vshli_s, true) +DO_2SHIFT(VSHRI_U, vshli_u, true) +DO_2SHIFT(VRSHRI_S, vrshli_s, true) +DO_2SHIFT(VRSHRI_U, vrshli_u, true) diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c index f915f70970..a45616cb63 100644 --- a/target/arm/translate-neon.c +++ b/target/arm/translate-neon.c @@ -33,24 +33,6 @@ static inline int plus1(DisasContext *s, int x) return x + 1; } -static inline int rsub_64(DisasContext *s, int x) -{ - return 64 - x; -} - -static inline int rsub_32(DisasContext *s, int x) -{ - return 32 - x; -} -static inline int rsub_16(DisasContext *s, int x) -{ - return 16 - x; -} -static inline int rsub_8(DisasContext *s, int x) -{ - return 8 - x; -} - static inline int neon_3same_fp_size(DisasContext *s, int x) { /* Convert 0==fp32, 1==fp16 into a MO_* value */ diff --git a/target/arm/translate.h b/target/arm/translate.h index e2f056c32c..4b5db937ef 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -161,6 +161,26 @@ static inline int times_2_plus_1(DisasContext *s, int x) return x * 2 + 1; } +static inline int rsub_64(DisasContext *s, int x) +{ + return 64 - x; +} + +static inline int rsub_32(DisasContext *s, int x) +{ + return 32 - x; +} + +static inline int rsub_16(DisasContext *s, int x) +{ + return 16 - x; +} + +static inline int rsub_8(DisasContext *s, int x) +{ + return 8 - x; +} + static inline int arm_dc_feature(DisasContext *dc, int feature) { return (dc->features & (1ULL << feature)) != 0; -- cgit v1.2.3 From c2262707034c2b596db41fbc682150948e939772 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:26 +0100 Subject: target/arm: Implement MVE VSHLL Implement the MVE VHLL (vector shift left long) insn. This has two encodings: the T1 encoding is the usual shift-by-immediate format, and the T2 encoding is a special case where the shift count is always equal to the element size. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-10-peter.maydell@linaro.org --- target/arm/helper-mve.h | 9 ++++++++ target/arm/mve.decode | 53 ++++++++++++++++++++++++++++++++++++++++++---- target/arm/mve_helper.c | 32 ++++++++++++++++++++++++++++ target/arm/translate-mve.c | 15 +++++++++++++ 4 files changed, 105 insertions(+), 4 deletions(-) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 288a8faf4e..8af0e7fd8c 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -387,3 +387,12 @@ DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshllbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 8be04589a6..6e6032b25a 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -64,6 +64,14 @@ @2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1 @2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2 +@2_shll_b .... .... ... 01 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0 +@2_shll_h .... .... ... 1 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1 +# VSHLL encoding T2 where shift == esize +@2_shll_esize_b .... .... .... 00 .. .... .... .... .... &2shift \ + qd=%qd qm=%qm size=0 shift=8 +@2_shll_esize_h .... .... .... 01 .. .... .... .... .... &2shift \ + qd=%qd qm=%qm size=1 shift=16 + # Right shifts are encoded as N - shift, where N is the element size in bits. %rshift_i5 16:5 !function=rsub_32 %rshift_i4 16:4 !function=rsub_16 @@ -122,11 +130,35 @@ VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op -VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op -VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op +# The VSHLL T2 encoding is not a @2op pattern, but is here because it +# overlaps what would be size=0b11 VMULH/VRMULH +{ + VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b + VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h + + VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op +} + +{ + VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b + VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h -VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op -VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op + VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op +} + +{ + VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b + VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h + + VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op +} + +{ + VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b + VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h + + VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op +} VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op @@ -326,3 +358,16 @@ VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w + +# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file +VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b +VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h + +VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b +VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h + +VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b +VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h + +VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b +VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index ac720c9ee0..8798e77cba 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1250,3 +1250,35 @@ DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP) DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) DO_2SHIFT_U(vrshli_u, DO_VRSHLU) DO_2SHIFT_S(vrshli_s, DO_VRSHLS) + +/* + * Long shifts taking half-sized inputs from top or bottom of the input + * vector and producing a double-width result. ESIZE, TYPE are for + * the input, and LESIZE, LTYPE for the output. + * Unlike the normal shift helpers, we do not handle negative shift counts, + * because the long shift is strictly left-only. + */ +#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ + void *vm, uint32_t shift) \ + { \ + LTYPE *d = vd; \ + TYPE *m = vm; \ + uint16_t mask = mve_element_mask(env); \ + unsigned le; \ + assert(shift <= 16); \ + for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ + LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \ + mergemask(&d[H##LESIZE(le)], r, mask); \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_VSHLL_ALL(OP, TOP) \ + DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \ + DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \ + DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \ + DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \ + +DO_VSHLL_ALL(vshllb, false) +DO_VSHLL_ALL(vshllt, true) diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index 4030ee07f0..044462c375 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -893,3 +893,18 @@ DO_2SHIFT(VSHRI_S, vshli_s, true) DO_2SHIFT(VSHRI_U, vshli_u, true) DO_2SHIFT(VRSHRI_S, vrshli_s, true) DO_2SHIFT(VRSHRI_U, vrshli_u, true) + +#define DO_VSHLL(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ + { \ + static MVEGenTwoOpShiftFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + }; \ + return do_2shift(s, a, fns[a->size], false); \ + } + +DO_VSHLL(VSHLL_BS, vshllbs) +DO_VSHLL(VSHLL_BU, vshllbu) +DO_VSHLL(VSHLL_TS, vshllts) +DO_VSHLL(VSHLL_TU, vshlltu) -- cgit v1.2.3 From a78b25fa71f1d2d9bcfdf2026743784e12efeeac Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:27 +0100 Subject: target/arm: Implement MVE VSRI, VSLI Implement the MVE VSRI and VSLI insns, which perform a shift-and-insert operation. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-11-peter.maydell@linaro.org --- target/arm/helper-mve.h | 8 ++++++++ target/arm/mve.decode | 9 +++++++++ target/arm/mve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++++++ target/arm/translate-mve.c | 3 +++ 4 files changed, 62 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 8af0e7fd8c..e452d2ef7a 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -396,3 +396,11 @@ DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 6e6032b25a..c3b5366617 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -371,3 +371,12 @@ VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h + +# Shift-and-insert +VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b +VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h +VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w + +VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b +VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h +VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 8798e77cba..24336d1d28 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1251,6 +1251,48 @@ DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) DO_2SHIFT_U(vrshli_u, DO_VRSHLU) DO_2SHIFT_S(vrshli_s, DO_VRSHLS) +/* Shift-and-insert; we always work with 64 bits at a time */ +#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ + void *vm, uint32_t shift) \ + { \ + uint64_t *d = vd, *m = vm; \ + uint16_t mask; \ + uint64_t shiftmask; \ + unsigned e; \ + if (shift == 0 || shift == ESIZE * 8) { \ + /* \ + * Only VSLI can shift by 0; only VSRI can shift by
. \ + * The generic logic would give the right answer for 0 but \ + * fails for
. \ + */ \ + goto done; \ + } \ + assert(shift < ESIZE * 8); \ + mask = mve_element_mask(env); \ + /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \ + shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \ + for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ + uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \ + (d[H8(e)] & ~shiftmask); \ + mergemask(&d[H8(e)], r, mask); \ + } \ +done: \ + mve_advance_vpt(env); \ + } + +#define DO_SHL(N, SHIFT) ((N) << (SHIFT)) +#define DO_SHR(N, SHIFT) ((N) >> (SHIFT)) +#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT)) +#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT)) + +DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK) +DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK) +DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK) +DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK) +DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK) +DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK) + /* * Long shifts taking half-sized inputs from top or bottom of the input * vector and producing a double-width result. ESIZE, TYPE are for diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index 044462c375..b031f84966 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -894,6 +894,9 @@ DO_2SHIFT(VSHRI_U, vshli_u, true) DO_2SHIFT(VRSHRI_S, vrshli_s, true) DO_2SHIFT(VRSHRI_U, vrshli_u, true) +DO_2SHIFT(VSRI, vsri, false) +DO_2SHIFT(VSLI, vsli, false) + #define DO_VSHLL(INSN, FN) \ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ { \ -- cgit v1.2.3 From 162e2655000689e44ac4c8e9e8dc413821e0adda Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:28 +0100 Subject: target/arm: Implement MVE VSHRN, VRSHRN Implement the MVE shift-right-and-narrow insn VSHRN and VRSHRN. do_urshr() is borrowed from sve_helper.c. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-12-peter.maydell@linaro.org --- target/arm/helper-mve.h | 10 ++++++++++ target/arm/mve.decode | 11 +++++++++++ target/arm/mve_helper.c | 40 ++++++++++++++++++++++++++++++++++++++++ target/arm/translate-mve.c | 15 +++++++++++++++ 4 files changed, 76 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index e452d2ef7a..323ac07fa3 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -404,3 +404,13 @@ DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index c3b5366617..e2c177f56a 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -380,3 +380,14 @@ VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w + +# Narrowing shifts (which only support b and h sizes) +VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b +VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h +VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b +VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h + +VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b +VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h +VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b +VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 24336d1d28..a97942208b 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1324,3 +1324,43 @@ DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK) DO_VSHLL_ALL(vshllb, false) DO_VSHLL_ALL(vshllt, true) + +/* + * Narrowing right shifts, taking a double sized input, shifting it + * and putting the result in either the top or bottom half of the output. + * ESIZE, TYPE are the output, and LESIZE, LTYPE the input. + */ +#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ + void *vm, uint32_t shift) \ + { \ + LTYPE *m = vm; \ + TYPE *d = vd; \ + uint16_t mask = mve_element_mask(env); \ + unsigned le; \ + for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ + TYPE r = FN(m[H##LESIZE(le)], shift); \ + mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_VSHRN_ALL(OP, FN) \ + DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \ + DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \ + DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \ + DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN) + +static inline uint64_t do_urshr(uint64_t x, unsigned sh) +{ + if (likely(sh < 64)) { + return (x >> sh) + ((x >> (sh - 1)) & 1); + } else if (sh == 64) { + return x >> 63; + } else { + return 0; + } +} + +DO_VSHRN_ALL(vshrn, DO_SHR) +DO_VSHRN_ALL(vrshrn, do_urshr) diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index b031f84966..f1a8f21b77 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -911,3 +911,18 @@ DO_VSHLL(VSHLL_BS, vshllbs) DO_VSHLL(VSHLL_BU, vshllbu) DO_VSHLL(VSHLL_TS, vshllts) DO_VSHLL(VSHLL_TU, vshlltu) + +#define DO_2SHIFT_N(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ + { \ + static MVEGenTwoOpShiftFn * const fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + }; \ + return do_2shift(s, a, fns[a->size], false); \ + } + +DO_2SHIFT_N(VSHRNB, vshrnb) +DO_2SHIFT_N(VSHRNT, vshrnt) +DO_2SHIFT_N(VRSHRNB, vrshrnb) +DO_2SHIFT_N(VRSHRNT, vrshrnt) -- cgit v1.2.3 From d6f9e011e8643fb00303e3fec24dd1e424f3f5b3 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:29 +0100 Subject: target/arm: Implement MVE saturating narrowing shifts Implement the MVE saturating shift-right-and-narrow insns VQSHRN, VQSHRUN, VQRSHRN and VQRSHRUN. do_srshr() is borrowed from sve_helper.c. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-13-peter.maydell@linaro.org --- target/arm/helper-mve.h | 30 +++++++++++++ target/arm/mve.decode | 28 ++++++++++++ target/arm/mve_helper.c | 104 +++++++++++++++++++++++++++++++++++++++++++++ target/arm/translate-mve.c | 12 ++++++ 4 files changed, 174 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 323ac07fa3..96b4c0dfd3 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -414,3 +414,33 @@ DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqrshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqrshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index e2c177f56a..1d11387bc0 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -391,3 +391,31 @@ VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h + +VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b +VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h +VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b +VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h +VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b +VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h +VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b +VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h + +VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b +VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h +VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b +VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h + +VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b +VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h +VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b +VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h +VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b +VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h +VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b +VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h + +VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b +VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h +VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b +VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index a97942208b..3e736e8909 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1362,5 +1362,109 @@ static inline uint64_t do_urshr(uint64_t x, unsigned sh) } } +static inline int64_t do_srshr(int64_t x, unsigned sh) +{ + if (likely(sh < 64)) { + return (x >> sh) + ((x >> (sh - 1)) & 1); + } else { + /* Rounding the sign bit always produces 0. */ + return 0; + } +} + DO_VSHRN_ALL(vshrn, DO_SHR) DO_VSHRN_ALL(vrshrn, do_urshr) + +static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max, + bool *satp) +{ + if (val > max) { + *satp = true; + return max; + } else if (val < min) { + *satp = true; + return min; + } else { + return val; + } +} + +/* Saturating narrowing right shifts */ +#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ + void *vm, uint32_t shift) \ + { \ + LTYPE *m = vm; \ + TYPE *d = vd; \ + uint16_t mask = mve_element_mask(env); \ + bool qc = false; \ + unsigned le; \ + for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ + bool sat = false; \ + TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \ + mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ + qc |= sat && (mask & 1 << (TOP * ESIZE)); \ + } \ + if (qc) { \ + env->vfp.qc[0] = qc; \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \ + DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ + DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) + +#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \ + DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ + DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) + +#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \ + DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ + DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) + +#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \ + DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ + DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) + +#define DO_SHRN_SB(N, M, SATP) \ + do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP) +#define DO_SHRN_UB(N, M, SATP) \ + do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP) +#define DO_SHRUN_B(N, M, SATP) \ + do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP) + +#define DO_SHRN_SH(N, M, SATP) \ + do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP) +#define DO_SHRN_UH(N, M, SATP) \ + do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP) +#define DO_SHRUN_H(N, M, SATP) \ + do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP) + +#define DO_RSHRN_SB(N, M, SATP) \ + do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP) +#define DO_RSHRN_UB(N, M, SATP) \ + do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP) +#define DO_RSHRUN_B(N, M, SATP) \ + do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP) + +#define DO_RSHRN_SH(N, M, SATP) \ + do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP) +#define DO_RSHRN_UH(N, M, SATP) \ + do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP) +#define DO_RSHRUN_H(N, M, SATP) \ + do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP) + +DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB) +DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH) +DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB) +DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH) +DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B) +DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H) + +DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB) +DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH) +DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB) +DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH) +DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B) +DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H) diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index f1a8f21b77..eef4f1f6ce 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -926,3 +926,15 @@ DO_2SHIFT_N(VSHRNB, vshrnb) DO_2SHIFT_N(VSHRNT, vshrnt) DO_2SHIFT_N(VRSHRNB, vrshrnb) DO_2SHIFT_N(VRSHRNT, vrshrnt) +DO_2SHIFT_N(VQSHRNB_S, vqshrnb_s) +DO_2SHIFT_N(VQSHRNT_S, vqshrnt_s) +DO_2SHIFT_N(VQSHRNB_U, vqshrnb_u) +DO_2SHIFT_N(VQSHRNT_U, vqshrnt_u) +DO_2SHIFT_N(VQSHRUNB, vqshrunb) +DO_2SHIFT_N(VQSHRUNT, vqshrunt) +DO_2SHIFT_N(VQRSHRNB_S, vqrshrnb_s) +DO_2SHIFT_N(VQRSHRNT_S, vqrshrnt_s) +DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u) +DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u) +DO_2SHIFT_N(VQRSHRUNB, vqrshrunb) +DO_2SHIFT_N(VQRSHRUNT, vqrshrunt) -- cgit v1.2.3 From 2e6a4ce0f61d4be3d85a5a9e75d1fb39faa23664 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:30 +0100 Subject: target/arm: Implement MVE VSHLC Implement the MVE VSHLC insn, which performs a shift left of the entire vector with carry in bits provided from a general purpose register and carry out bits written back to that register. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-14-peter.maydell@linaro.org --- target/arm/helper-mve.h | 2 ++ target/arm/mve.decode | 2 ++ target/arm/mve_helper.c | 38 ++++++++++++++++++++++++++++++++++++++ target/arm/translate-mve.c | 30 ++++++++++++++++++++++++++++++ 4 files changed, 72 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 96b4c0dfd3..d414b6309d 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -444,3 +444,5 @@ DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 1d11387bc0..914b108c37 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -419,3 +419,5 @@ VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h + +VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 3e736e8909..9d4a07c1c0 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1468,3 +1468,41 @@ DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB) DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH) DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B) DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H) + +uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm, + uint32_t shift) +{ + uint32_t *d = vd; + uint16_t mask = mve_element_mask(env); + unsigned e; + uint32_t r; + + /* + * For each 32-bit element, we shift it left, bringing in the + * low 'shift' bits of rdm at the bottom. Bits shifted out at + * the top become the new rdm, if the predicate mask permits. + * The final rdm value is returned to update the register. + * shift == 0 here means "shift by 32 bits". + */ + if (shift == 0) { + for (e = 0; e < 16 / 4; e++, mask >>= 4) { + r = rdm; + if (mask & 1) { + rdm = d[H4(e)]; + } + mergemask(&d[H4(e)], r, mask); + } + } else { + uint32_t shiftmask = MAKE_64BIT_MASK(0, shift); + + for (e = 0; e < 16 / 4; e++, mask >>= 4) { + r = (d[H4(e)] << shift) | (rdm & shiftmask); + if (mask & 1) { + rdm = d[H4(e)] >> (32 - shift); + } + mergemask(&d[H4(e)], r, mask); + } + } + mve_advance_vpt(env); + return rdm; +} diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index eef4f1f6ce..460dff260f 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -938,3 +938,33 @@ DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u) DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u) DO_2SHIFT_N(VQRSHRUNB, vqrshrunb) DO_2SHIFT_N(VQRSHRUNT, vqrshrunt) + +static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a) +{ + /* + * Whole Vector Left Shift with Carry. The carry is taken + * from a general purpose register and written back there. + * An imm of 0 means "shift by 32". + */ + TCGv_ptr qd; + TCGv_i32 rdm; + + if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) { + return false; + } + if (a->rdm == 13 || a->rdm == 15) { + /* CONSTRAINED UNPREDICTABLE: we UNDEF */ + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + qd = mve_qreg_ptr(a->qd); + rdm = load_reg(s, a->rdm); + gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm)); + store_reg(s, a->rdm, rdm); + tcg_temp_free_ptr(qd); + mve_update_eci(s); + return true; +} -- cgit v1.2.3 From d43ebd9dc8a268195dcc8219ced96f9e3bdc4050 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:31 +0100 Subject: target/arm: Implement MVE VADDLV Implement the MVE VADDLV insn; this is similar to VADDV, except that it accumulates 32-bit elements into a 64-bit accumulator stored in a pair of general-purpose registers. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-15-peter.maydell@linaro.org --- target/arm/helper-mve.h | 3 +++ target/arm/mve.decode | 6 ++++- target/arm/mve_helper.c | 19 ++++++++++++++ target/arm/translate-mve.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 90 insertions(+), 1 deletion(-) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index d414b6309d..cf5ba860f2 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -356,6 +356,9 @@ DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32) DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32) +DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64) +DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64) + DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64) DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64) DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 914b108c37..595d97568e 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -307,7 +307,11 @@ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar # Vector add across vector -VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo +{ + VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo + VADDLV 111 u:1 1110 1 ... 1001 ... 0 1111 00 a:1 0 qm:3 0 \ + rdahi=%rdahi rdalo=%rdalo +} # Predicate operations %mask_22_13 22:1 13:3 diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 9d4a07c1c0..37af94bd9e 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1189,6 +1189,25 @@ DO_VADDV(vaddvub, 1, uint8_t) DO_VADDV(vaddvuh, 2, uint16_t) DO_VADDV(vaddvuw, 4, uint32_t) +#define DO_VADDLV(OP, TYPE, LTYPE) \ + uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ + uint64_t ra) \ + { \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + TYPE *m = vm; \ + for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ + if (mask & 1) { \ + ra += (LTYPE)m[H4(e)]; \ + } \ + } \ + mve_advance_vpt(env); \ + return ra; \ + } \ + +DO_VADDLV(vaddlv_s, int32_t, int64_t) +DO_VADDLV(vaddlv_u, uint32_t, uint64_t) + /* Shifts by immediate */ #define DO_2SHIFT(OP, ESIZE, TYPE, FN) \ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index 460dff260f..a2a45036a0 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -790,6 +790,69 @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a) return true; } +static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) +{ + /* + * Vector Add Long Across Vector: accumulate the 32-bit + * elements of the vector into a 64-bit result stored in + * a pair of general-purpose registers. + * No need to check Qm's bank: it is only 3 bits in decode. + */ + TCGv_ptr qm; + TCGv_i64 rda; + TCGv_i32 rdalo, rdahi; + + if (!dc_isar_feature(aa32_mve, s)) { + return false; + } + /* + * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related + * encoding; rdalo always has bit 0 clear so cannot be 13 or 15. + */ + if (a->rdahi == 13 || a->rdahi == 15) { + return false; + } + if (!mve_eci_check(s) || !vfp_access_check(s)) { + return true; + } + + /* + * This insn is subject to beat-wise execution. Partial execution + * of an A=0 (no-accumulate) insn which does not execute the first + * beat must start with the current value of RdaHi:RdaLo, not zero. + */ + if (a->a || mve_skip_first_beat(s)) { + /* Accumulate input from RdaHi:RdaLo */ + rda = tcg_temp_new_i64(); + rdalo = load_reg(s, a->rdalo); + rdahi = load_reg(s, a->rdahi); + tcg_gen_concat_i32_i64(rda, rdalo, rdahi); + tcg_temp_free_i32(rdalo); + tcg_temp_free_i32(rdahi); + } else { + /* Accumulate starting at zero */ + rda = tcg_const_i64(0); + } + + qm = mve_qreg_ptr(a->qm); + if (a->u) { + gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda); + } else { + gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda); + } + tcg_temp_free_ptr(qm); + + rdalo = tcg_temp_new_i32(); + rdahi = tcg_temp_new_i32(); + tcg_gen_extrl_i64_i32(rdalo, rda); + tcg_gen_extrh_i64_i32(rdahi, rda); + store_reg(s, a->rdalo, rdalo); + store_reg(s, a->rdahi, rdahi); + tcg_temp_free_i64(rda); + mve_update_eci(s); + return true; +} + static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn) { TCGv_ptr qd; -- cgit v1.2.3 From f4ae6c8cbda8d9b21290e9b8ae21b785ca24aace Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:32 +0100 Subject: target/arm: Implement MVE long shifts by immediate The MVE extension to v8.1M includes some new shift instructions which sit entirely within the non-coprocessor part of the encoding space and which operate only on general-purpose registers. They take up the space which was previously UNPREDICTABLE MOVS and ORRS encodings with Rm == 13 or 15. Implement the long shifts by immediate, which perform shifts on a pair of general-purpose registers treated as a 64-bit quantity, with an immediate shift count between 1 and 32. Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for the Rm==13,15 case, we need to explicitly emit code to UNDEF for the cases where v8.1M now requires that. (Trying to change MOVS and ORRS is too difficult, because the functions that generate the code are shared between a dozen different kinds of arithmetic or logical instruction for all A32, T16 and T32 encodings, and for some insns and some encodings Rm==13,15 are valid.) We make the helper functions we need for UQSHLL and SQSHLL take a 32-bit value which the helper casts to int8_t because we'll need these helpers also for the shift-by-register insns, where the shift count might be < 0 or > 32. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-16-peter.maydell@linaro.org --- target/arm/helper-mve.h | 3 ++ target/arm/mve_helper.c | 10 ++++++ target/arm/t32.decode | 28 +++++++++++++++ target/arm/translate.c | 90 +++++++++++++++++++++++++++++++++++++++++++++++++ target/arm/translate.h | 1 + 5 files changed, 132 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index cf5ba860f2..d3ad7411eb 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -449,3 +449,6 @@ DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32) + +DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32) diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 37af94bd9e..7cd359ec9c 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1525,3 +1525,13 @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm, mve_advance_vpt(env); return rdm; } + +uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift) +{ + return do_sqrshl_d(n, (int8_t)shift, false, &env->QF); +} + +uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift) +{ + return do_uqrshl_d(n, (int8_t)shift, false, &env->QF); +} diff --git a/target/arm/t32.decode b/target/arm/t32.decode index 0f9326c724..d740320a98 100644 --- a/target/arm/t32.decode +++ b/target/arm/t32.decode @@ -48,6 +48,13 @@ &mcr !extern cp opc1 crn crm opc2 rt &mcrr !extern cp opc1 crm rt rt2 +&mve_shl_ri rdalo rdahi shim + +# rdahi: bits [3:1] from insn, bit 0 is 1 +# rdalo: bits [3:1] from insn, bit 0 is 0 +%rdahi_9 9:3 !function=times_2_plus_1 +%rdalo_17 17:3 !function=times_2 + # Data-processing (register) %imm5_12_6 12:3 6:2 @@ -59,12 +66,33 @@ @S_xrr_shi ....... .... . rn:4 .... .... .. shty:2 rm:4 \ &s_rrr_shi shim=%imm5_12_6 s=1 rd=0 +@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \ + &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9 + { TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi AND_rrri 1110101 0000 . .... 0 ... .... .... .... @s_rrr_shi } BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi { + # The v8.1M MVE shift insns overlap in encoding with MOVS/ORRS + # and are distinguished by having Rm==13 or 15. Those are UNPREDICTABLE + # cases for MOVS/ORRS. We decode the MVE cases first, ensuring that + # they explicitly call unallocated_encoding() for cases that must UNDEF + # (eg "using a new shift insn on a v8.1M CPU without MVE"), and letting + # the rest fall through (where ORR_rrri and MOV_rxri will end up + # handling them as r13 and r15 accesses with the same semantics as A32). + [ + LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri + LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri + ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri + + UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri + URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri + SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri + SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri + ] + MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi } diff --git a/target/arm/translate.c b/target/arm/translate.c index 66b24ab56e..e0a481fed9 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -5702,6 +5702,96 @@ static bool trans_MOVT(DisasContext *s, arg_MOVW *a) return true; } +/* + * v8.1M MVE wide-shifts + */ +static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a, + WideShiftImmFn *fn) +{ + TCGv_i64 rda; + TCGv_i32 rdalo, rdahi; + + if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { + /* Decode falls through to ORR/MOV UNPREDICTABLE handling */ + return false; + } + if (a->rdahi == 15) { + /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */ + return false; + } + if (!dc_isar_feature(aa32_mve, s) || + !arm_dc_feature(s, ARM_FEATURE_M_MAIN) || + a->rdahi == 13) { + /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */ + unallocated_encoding(s); + return true; + } + + if (a->shim == 0) { + a->shim = 32; + } + + rda = tcg_temp_new_i64(); + rdalo = load_reg(s, a->rdalo); + rdahi = load_reg(s, a->rdahi); + tcg_gen_concat_i32_i64(rda, rdalo, rdahi); + + fn(rda, rda, a->shim); + + tcg_gen_extrl_i64_i32(rdalo, rda); + tcg_gen_extrh_i64_i32(rdahi, rda); + store_reg(s, a->rdalo, rdalo); + store_reg(s, a->rdahi, rdahi); + tcg_temp_free_i64(rda); + + return true; +} + +static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a) +{ + return do_mve_shl_ri(s, a, tcg_gen_sari_i64); +} + +static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a) +{ + return do_mve_shl_ri(s, a, tcg_gen_shli_i64); +} + +static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a) +{ + return do_mve_shl_ri(s, a, tcg_gen_shri_i64); +} + +static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift) +{ + gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift)); +} + +static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a) +{ + return do_mve_shl_ri(s, a, gen_mve_sqshll); +} + +static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift) +{ + gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift)); +} + +static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a) +{ + return do_mve_shl_ri(s, a, gen_mve_uqshll); +} + +static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a) +{ + return do_mve_shl_ri(s, a, gen_srshr64_i64); +} + +static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a) +{ + return do_mve_shl_ri(s, a, gen_urshr64_i64); +} + /* * Multiply and multiply accumulate */ diff --git a/target/arm/translate.h b/target/arm/translate.h index 4b5db937ef..8e64ee508c 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -464,6 +464,7 @@ typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr); typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); +typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift); /** * arm_tbflags_from_tb: -- cgit v1.2.3 From 0aa4b4c358bfced42306de697e6408cabf922cf5 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:33 +0100 Subject: target/arm: Implement MVE long shifts by register Implement the MVE long shifts by register, which perform shifts on a pair of general-purpose registers treated as a 64-bit quantity, with the shift count in another general-purpose register, which might be either positive or negative. Like the long-shifts-by-immediate, these encodings sit in the space that was previously the UNPREDICTABLE MOVS/ORRS with Rm==13,15. Because LSLL_rr and ASRL_rr overlap with both MOV_rxri/ORR_rrri and also with CSEL (as one of the previously-UNPREDICTABLE Rm==13 cases), we have to move the CSEL pattern into the same decodetree group. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-17-peter.maydell@linaro.org --- target/arm/helper-mve.h | 6 ++++ target/arm/mve_helper.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++++ target/arm/t32.decode | 16 +++++++-- target/arm/translate.c | 69 ++++++++++++++++++++++++++++++++++++ target/arm/translate.h | 1 + 5 files changed, 182 insertions(+), 3 deletions(-) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index d3ad7411eb..7a4316bf8d 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -450,5 +450,11 @@ DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32) +DEF_HELPER_FLAGS_3(mve_sshrl, TCG_CALL_NO_RWG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(mve_ushll, TCG_CALL_NO_RWG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32) diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 7cd359ec9c..bba150c790 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1526,6 +1526,16 @@ uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm, return rdm; } +uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift) +{ + return do_sqrshl_d(n, -(int8_t)shift, false, NULL); +} + +uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift) +{ + return do_uqrshl_d(n, (int8_t)shift, false, NULL); +} + uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift) { return do_sqrshl_d(n, (int8_t)shift, false, &env->QF); @@ -1535,3 +1545,86 @@ uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift) { return do_uqrshl_d(n, (int8_t)shift, false, &env->QF); } + +uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift) +{ + return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF); +} + +uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift) +{ + return do_uqrshl_d(n, (int8_t)shift, true, &env->QF); +} + +/* Operate on 64-bit values, but saturate at 48 bits */ +static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift, + bool round, uint32_t *sat) +{ + if (shift <= -48) { + /* Rounding the sign bit always produces 0. */ + if (round) { + return 0; + } + return src >> 63; + } else if (shift < 0) { + if (round) { + src >>= -shift - 1; + return (src >> 1) + (src & 1); + } + return src >> -shift; + } else if (shift < 48) { + int64_t val = src << shift; + int64_t extval = sextract64(val, 0, 48); + if (!sat || val == extval) { + return extval; + } + } else if (!sat || src == 0) { + return 0; + } + + *sat = 1; + return (1ULL << 47) - (src >= 0); +} + +/* Operate on 64-bit values, but saturate at 48 bits */ +static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift, + bool round, uint32_t *sat) +{ + uint64_t val, extval; + + if (shift <= -(48 + round)) { + return 0; + } else if (shift < 0) { + if (round) { + val = src >> (-shift - 1); + val = (val >> 1) + (val & 1); + } else { + val = src >> -shift; + } + extval = extract64(val, 0, 48); + if (!sat || val == extval) { + return extval; + } + } else if (shift < 48) { + uint64_t val = src << shift; + uint64_t extval = extract64(val, 0, 48); + if (!sat || val == extval) { + return extval; + } + } else if (!sat || src == 0) { + return 0; + } + + *sat = 1; + return MAKE_64BIT_MASK(0, 48); +} + +uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift) +{ + return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF); +} + +uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift) +{ + return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF); +} diff --git a/target/arm/t32.decode b/target/arm/t32.decode index d740320a98..dc76dee44d 100644 --- a/target/arm/t32.decode +++ b/target/arm/t32.decode @@ -49,6 +49,7 @@ &mcrr !extern cp opc1 crm rt rt2 &mve_shl_ri rdalo rdahi shim +&mve_shl_rr rdalo rdahi rm # rdahi: bits [3:1] from insn, bit 0 is 1 # rdalo: bits [3:1] from insn, bit 0 is 0 @@ -68,6 +69,8 @@ @mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \ &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9 +@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \ + &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9 { TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi @@ -91,10 +94,20 @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri + + LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr + ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr + UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr + SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr + UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr + SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr ] MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi + + # v8.1M CSEL and friends + CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4 } { MVN_rxri 1110101 0011 . 1111 0 ... .... .... .... @s_rxr_shi @@ -118,9 +131,6 @@ SBC_rrri 1110101 1011 . .... 0 ... .... .... .... @s_rrr_shi } RSB_rrri 1110101 1110 . .... 0 ... .... .... .... @s_rrr_shi -# v8.1M CSEL and friends -CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4 - # Data-processing (register-shifted register) MOV_rxrr 1111 1010 0 shty:2 s:1 rm:4 1111 rd:4 0000 rs:4 \ diff --git a/target/arm/translate.c b/target/arm/translate.c index e0a481fed9..f123752431 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -5792,6 +5792,75 @@ static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a) return do_mve_shl_ri(s, a, gen_urshr64_i64); } +static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn) +{ + TCGv_i64 rda; + TCGv_i32 rdalo, rdahi; + + if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { + /* Decode falls through to ORR/MOV UNPREDICTABLE handling */ + return false; + } + if (a->rdahi == 15) { + /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */ + return false; + } + if (!dc_isar_feature(aa32_mve, s) || + !arm_dc_feature(s, ARM_FEATURE_M_MAIN) || + a->rdahi == 13 || a->rm == 13 || a->rm == 15 || + a->rm == a->rdahi || a->rm == a->rdalo) { + /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */ + unallocated_encoding(s); + return true; + } + + rda = tcg_temp_new_i64(); + rdalo = load_reg(s, a->rdalo); + rdahi = load_reg(s, a->rdahi); + tcg_gen_concat_i32_i64(rda, rdalo, rdahi); + + /* The helper takes care of the sign-extension of the low 8 bits of Rm */ + fn(rda, cpu_env, rda, cpu_R[a->rm]); + + tcg_gen_extrl_i64_i32(rdalo, rda); + tcg_gen_extrh_i64_i32(rdahi, rda); + store_reg(s, a->rdalo, rdalo); + store_reg(s, a->rdahi, rdahi); + tcg_temp_free_i64(rda); + + return true; +} + +static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_ushll); +} + +static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_sshrl); +} + +static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll); +} + +static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl); +} + +static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48); +} + +static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a) +{ + return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48); +} + /* * Multiply and multiply accumulate */ diff --git a/target/arm/translate.h b/target/arm/translate.h index 8e64ee508c..10e9433581 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -465,6 +465,7 @@ typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift); +typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32); /** * arm_tbflags_from_tb: -- cgit v1.2.3 From 46321d47a91499897bd032361dc24013d70f21a5 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:34 +0100 Subject: target/arm: Implement MVE shifts by immediate Implement the MVE shifts by immediate, which perform shifts on a single general-purpose register. These patterns overlap with the long-shift-by-immediates, so we have to rearrange the grouping a little here. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-18-peter.maydell@linaro.org --- target/arm/helper-mve.h | 3 +++ target/arm/mve_helper.c | 10 ++++++++ target/arm/t32.decode | 33 ++++++++++++++++++------ target/arm/translate.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++-- target/arm/translate.h | 1 + 5 files changed, 105 insertions(+), 10 deletions(-) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 7a4316bf8d..1fba9d6422 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -458,3 +458,6 @@ DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32) + +DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32) diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index bba150c790..5e60e2a9d8 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1628,3 +1628,13 @@ uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift) { return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF); } + +uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift) +{ + return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); +} + +uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift) +{ + return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); +} diff --git a/target/arm/t32.decode b/target/arm/t32.decode index dc76dee44d..1c3406c67a 100644 --- a/target/arm/t32.decode +++ b/target/arm/t32.decode @@ -50,6 +50,7 @@ &mve_shl_ri rdalo rdahi shim &mve_shl_rr rdalo rdahi rm +&mve_sh_ri rda shim # rdahi: bits [3:1] from insn, bit 0 is 1 # rdalo: bits [3:1] from insn, bit 0 is 0 @@ -71,6 +72,8 @@ &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9 @mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \ &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9 +@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \ + &mve_sh_ri shim=%imm5_12_6 { TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi @@ -86,14 +89,28 @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi # the rest fall through (where ORR_rrri and MOV_rxri will end up # handling them as r13 and r15 accesses with the same semantics as A32). [ - LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri - LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri - ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri - - UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri - URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri - SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri - SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri + { + UQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 00 1111 @mve_sh_ri + LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri + UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri + } + + { + URSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 01 1111 @mve_sh_ri + LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri + URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri + } + + { + SRSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 10 1111 @mve_sh_ri + ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri + SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri + } + + { + SQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 11 1111 @mve_sh_ri + SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri + } LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr diff --git a/target/arm/translate.c b/target/arm/translate.c index f123752431..e38619b571 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -3218,8 +3218,14 @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t; + /* Handle shift by the input size for the benefit of trans_SRSHR_ri */ + if (sh == 32) { + tcg_gen_movi_i32(d, 0); + return; + } + t = tcg_temp_new_i32(); tcg_gen_extract_i32(t, a, sh - 1, 1); tcg_gen_sari_i32(d, a, sh); tcg_gen_add_i32(d, d, t); @@ -3419,8 +3425,14 @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) { - TCGv_i32 t = tcg_temp_new_i32(); + TCGv_i32 t; + /* Handle shift by the input size for the benefit of trans_URSHR_ri */ + if (sh == 32) { + tcg_gen_extract_i32(d, a, sh - 1, 1); + return; + } + t = tcg_temp_new_i32(); tcg_gen_extract_i32(t, a, sh - 1, 1); tcg_gen_shri_i32(d, a, sh); tcg_gen_add_i32(d, d, t); @@ -5861,6 +5873,58 @@ static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a) return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48); } +static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn) +{ + if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { + /* Decode falls through to ORR/MOV UNPREDICTABLE handling */ + return false; + } + if (!dc_isar_feature(aa32_mve, s) || + !arm_dc_feature(s, ARM_FEATURE_M_MAIN) || + a->rda == 13 || a->rda == 15) { + /* These rda cases are UNPREDICTABLE; we choose to UNDEF */ + unallocated_encoding(s); + return true; + } + + if (a->shim == 0) { + a->shim = 32; + } + fn(cpu_R[a->rda], cpu_R[a->rda], a->shim); + + return true; +} + +static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a) +{ + return do_mve_sh_ri(s, a, gen_urshr32_i32); +} + +static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a) +{ + return do_mve_sh_ri(s, a, gen_srshr32_i32); +} + +static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift) +{ + gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift)); +} + +static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a) +{ + return do_mve_sh_ri(s, a, gen_mve_sqshl); +} + +static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift) +{ + gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift)); +} + +static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a) +{ + return do_mve_sh_ri(s, a, gen_mve_uqshl); +} + /* * Multiply and multiply accumulate */ diff --git a/target/arm/translate.h b/target/arm/translate.h index 10e9433581..2c7ca2a1f7 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -466,6 +466,7 @@ typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift); typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32); +typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift); /** * arm_tbflags_from_tb: -- cgit v1.2.3 From 04ea4d3cfd0a21b248ece8eb7a9436a3d9898dd8 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:35 +0100 Subject: target/arm: Implement MVE shifts by register Implement the MVE shifts by register, which perform shifts on a single general-purpose register. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-19-peter.maydell@linaro.org --- target/arm/helper-mve.h | 2 ++ target/arm/mve_helper.c | 10 ++++++++++ target/arm/t32.decode | 18 ++++++++++++++---- target/arm/translate.c | 30 ++++++++++++++++++++++++++++++ target/arm/translate.h | 1 + 5 files changed, 57 insertions(+), 4 deletions(-) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 1fba9d6422..56e40844ad 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -461,3 +461,5 @@ DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32) DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32) diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 5e60e2a9d8..db5d622085 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1638,3 +1638,13 @@ uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift) { return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); } + +uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift) +{ + return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF); +} + +uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift) +{ + return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF); +} diff --git a/target/arm/t32.decode b/target/arm/t32.decode index 1c3406c67a..2d47f31f14 100644 --- a/target/arm/t32.decode +++ b/target/arm/t32.decode @@ -51,6 +51,7 @@ &mve_shl_ri rdalo rdahi shim &mve_shl_rr rdalo rdahi rm &mve_sh_ri rda shim +&mve_sh_rr rda rm # rdahi: bits [3:1] from insn, bit 0 is 1 # rdalo: bits [3:1] from insn, bit 0 is 0 @@ -74,6 +75,7 @@ &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9 @mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \ &mve_sh_ri shim=%imm5_12_6 +@mve_sh_rr ....... .... . rda:4 rm:4 .... .... .... &mve_sh_rr { TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi @@ -112,10 +114,18 @@ BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri } - LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr - ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr - UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr - SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr + { + UQRSHL_rr 1110101 0010 1 .... .... 1111 0000 1101 @mve_sh_rr + LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr + UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr + } + + { + SQRSHR_rr 1110101 0010 1 .... .... 1111 0010 1101 @mve_sh_rr + ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr + SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr + } + UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr ] diff --git a/target/arm/translate.c b/target/arm/translate.c index e38619b571..28e478927d 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -5925,6 +5925,36 @@ static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a) return do_mve_sh_ri(s, a, gen_mve_uqshl); } +static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn) +{ + if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { + /* Decode falls through to ORR/MOV UNPREDICTABLE handling */ + return false; + } + if (!dc_isar_feature(aa32_mve, s) || + !arm_dc_feature(s, ARM_FEATURE_M_MAIN) || + a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 || + a->rm == a->rda) { + /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */ + unallocated_encoding(s); + return true; + } + + /* The helper takes care of the sign-extension of the low 8 bits of Rm */ + fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]); + return true; +} + +static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a) +{ + return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr); +} + +static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a) +{ + return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl); +} + /* * Multiply and multiply accumulate */ diff --git a/target/arm/translate.h b/target/arm/translate.h index 2c7ca2a1f7..241596c5bd 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -467,6 +467,7 @@ typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift); typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32); typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift); +typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); /** * arm_tbflags_from_tb: -- cgit v1.2.3