aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.d/base.yml2
-rw-r--r--.gitlab-ci.d/buildtest-template.yml10
-rw-r--r--.gitlab-ci.d/buildtest.yml24
-rw-r--r--.gitlab-ci.d/crossbuild-template.yml5
-rw-r--r--.gitlab-ci.d/crossbuilds.yml4
-rw-r--r--.gitlab-ci.d/windows.yml7
-rw-r--r--MAINTAINERS4
-rwxr-xr-xconfigure8
-rw-r--r--contrib/gitdm/filetypes.txt3
-rw-r--r--docs/about/deprecated.rst6
-rw-r--r--docs/devel/kconfig.rst2
-rw-r--r--hw/arm/allwinner-a10.c1
-rw-r--r--hw/arm/boot.c6
-rw-r--r--hw/arm/exynos4210.c4
-rw-r--r--hw/arm/mps2-tz.c2
-rw-r--r--hw/arm/mps2.c41
-rw-r--r--hw/arm/musicpal.c4
-rw-r--r--hw/arm/stellaris.c11
-rw-r--r--hw/char/pl011.c17
-rw-r--r--hw/char/xilinx_uartlite.c4
-rw-r--r--hw/core/irq.c9
-rw-r--r--hw/core/or-irq.c18
-rw-r--r--hw/gpio/max7310.c5
-rw-r--r--hw/intc/armv7m_nvic.c26
-rw-r--r--hw/microblaze/petalogix_s3adsp1800_mmu.c7
-rw-r--r--hw/pci-host/mv64361.c1
-rw-r--r--hw/pci-host/raven.c2
-rw-r--r--hw/ppc/pegasos2.c1
-rw-r--r--hw/riscv/boot.c97
-rw-r--r--hw/riscv/microchip_pfsoc.c12
-rw-r--r--hw/riscv/opentitan.c4
-rw-r--r--hw/riscv/sifive_e.c4
-rw-r--r--hw/riscv/sifive_u.c12
-rw-r--r--hw/riscv/spike.c14
-rw-r--r--hw/riscv/virt.c12
-rw-r--r--hw/s390x/pv.c28
-rw-r--r--hw/s390x/s390-virtio-ccw.c5
-rw-r--r--hw/scsi/viosrp.h3
-rw-r--r--hw/sensor/dps310.c1
-rw-r--r--hw/sh4/sh7750_regs.h3
-rw-r--r--hw/vfio/ccw.c40
-rw-r--r--include/exec/cpu-defs.h6
-rw-r--r--include/hw/arm/allwinner-a10.h2
-rw-r--r--include/hw/arm/armsse.h6
-rw-r--r--include/hw/arm/bcm2835_peripherals.h2
-rw-r--r--include/hw/arm/exynos4210.h4
-rw-r--r--include/hw/arm/raspi_platform.h3
-rw-r--r--include/hw/arm/stm32f205_soc.h2
-rw-r--r--include/hw/arm/stm32f405_soc.h2
-rw-r--r--include/hw/arm/xlnx-versal.h6
-rw-r--r--include/hw/arm/xlnx-zynqmp.h2
-rw-r--r--include/hw/char/cmsdk-apb-uart.h34
-rw-r--r--include/hw/char/pl011.h36
-rw-r--r--include/hw/char/xilinx_uartlite.h22
-rw-r--r--include/hw/or-irq.h5
-rw-r--r--include/hw/riscv/boot.h3
-rw-r--r--include/hw/s390x/pv.h2
-rw-r--r--include/hw/ssi/ibex_spi_host.h1
-rw-r--r--include/hw/timer/cmsdk-apb-timer.h1
-rw-r--r--include/hw/tricore/tricore_testdevice.h1
-rw-r--r--include/qemu/uri.h3
-rw-r--r--iothread.c4
-rw-r--r--pc-bios/keymaps/meson.build2
-rw-r--r--qemu-keymap.c2
-rw-r--r--softmmu/vl.c1
-rw-r--r--target/arm/arm-powerctl.c7
-rw-r--r--target/arm/cpu.c9
-rw-r--r--target/arm/debug_helper.c490
-rw-r--r--target/arm/helper.c411
-rw-r--r--target/arm/internals.h23
-rw-r--r--target/arm/machine.c12
-rw-r--r--target/arm/meson.build46
-rw-r--r--target/arm/ptw.c4
-rw-r--r--target/arm/tcg-stubs.c27
-rw-r--r--target/arm/tcg/a32-uncond.decode (renamed from target/arm/a32-uncond.decode)0
-rw-r--r--target/arm/tcg/a32.decode (renamed from target/arm/a32.decode)0
-rw-r--r--target/arm/tcg/crypto_helper.c (renamed from target/arm/crypto_helper.c)0
-rw-r--r--target/arm/tcg/helper-a64.c (renamed from target/arm/helper-a64.c)0
-rw-r--r--target/arm/tcg/hflags.c403
-rw-r--r--target/arm/tcg/iwmmxt_helper.c (renamed from target/arm/iwmmxt_helper.c)0
-rw-r--r--target/arm/tcg/m-nocp.decode (renamed from target/arm/m-nocp.decode)0
-rw-r--r--target/arm/tcg/m_helper.c (renamed from target/arm/m_helper.c)0
-rw-r--r--target/arm/tcg/meson.build50
-rw-r--r--target/arm/tcg/mte_helper.c (renamed from target/arm/mte_helper.c)0
-rw-r--r--target/arm/tcg/mve.decode (renamed from target/arm/mve.decode)0
-rw-r--r--target/arm/tcg/mve_helper.c (renamed from target/arm/mve_helper.c)0
-rw-r--r--target/arm/tcg/neon-dp.decode (renamed from target/arm/neon-dp.decode)0
-rw-r--r--target/arm/tcg/neon-ls.decode (renamed from target/arm/neon-ls.decode)0
-rw-r--r--target/arm/tcg/neon-shared.decode (renamed from target/arm/neon-shared.decode)0
-rw-r--r--target/arm/tcg/neon_helper.c (renamed from target/arm/neon_helper.c)0
-rw-r--r--target/arm/tcg/op_helper.c (renamed from target/arm/op_helper.c)0
-rw-r--r--target/arm/tcg/pauth_helper.c (renamed from target/arm/pauth_helper.c)0
-rw-r--r--target/arm/tcg/psci.c (renamed from target/arm/psci.c)0
-rw-r--r--target/arm/tcg/sme-fa64.decode (renamed from target/arm/sme-fa64.decode)0
-rw-r--r--target/arm/tcg/sme.decode (renamed from target/arm/sme.decode)0
-rw-r--r--target/arm/tcg/sme_helper.c (renamed from target/arm/sme_helper.c)0
-rw-r--r--target/arm/tcg/sve.decode (renamed from target/arm/sve.decode)0
-rw-r--r--target/arm/tcg/sve_helper.c (renamed from target/arm/sve_helper.c)0
-rw-r--r--target/arm/tcg/t16.decode (renamed from target/arm/t16.decode)0
-rw-r--r--target/arm/tcg/t32.decode (renamed from target/arm/t32.decode)0
-rw-r--r--target/arm/tcg/tlb_helper.c (renamed from target/arm/tlb_helper.c)18
-rw-r--r--target/arm/tcg/translate-a64.c (renamed from target/arm/translate-a64.c)0
-rw-r--r--target/arm/tcg/translate-a64.h (renamed from target/arm/translate-a64.h)0
-rw-r--r--target/arm/tcg/translate-m-nocp.c (renamed from target/arm/translate-m-nocp.c)0
-rw-r--r--target/arm/tcg/translate-mve.c (renamed from target/arm/translate-mve.c)0
-rw-r--r--target/arm/tcg/translate-neon.c (renamed from target/arm/translate-neon.c)0
-rw-r--r--target/arm/tcg/translate-sme.c (renamed from target/arm/translate-sme.c)0
-rw-r--r--target/arm/tcg/translate-sve.c (renamed from target/arm/translate-sve.c)0
-rw-r--r--target/arm/tcg/translate-vfp.c (renamed from target/arm/translate-vfp.c)0
-rw-r--r--target/arm/tcg/translate.c (renamed from target/arm/translate.c)0
-rw-r--r--target/arm/tcg/translate.h (renamed from target/arm/translate.h)0
-rw-r--r--target/arm/tcg/vec_helper.c (renamed from target/arm/vec_helper.c)0
-rw-r--r--target/arm/tcg/vec_internal.h (renamed from target/arm/vec_internal.h)0
-rw-r--r--target/arm/tcg/vfp-uncond.decode (renamed from target/arm/vfp-uncond.decode)0
-rw-r--r--target/arm/tcg/vfp.decode (renamed from target/arm/vfp.decode)0
-rw-r--r--target/riscv/cpu.c2
-rw-r--r--target/riscv/cpu_helper.c2
-rw-r--r--target/riscv/csr.c21
-rw-r--r--target/riscv/pmp.c9
-rw-r--r--target/riscv/vector_helper.c4
-rw-r--r--target/s390x/arch_dump.c22
-rw-r--r--target/s390x/tcg/mem_helper.c287
-rw-r--r--target/s390x/tcg/translate.c480
-rw-r--r--target/s390x/tcg/translate_vx.c.inc45
-rw-r--r--tests/avocado/version.py1
-rwxr-xr-xtests/qemu-iotests/0224
-rw-r--r--tests/qtest/rtl8139-test.c15
-rw-r--r--tests/tcg/s390x/Makefile.softmmu-target2
-rw-r--r--tests/tcg/s390x/bal.S24
-rw-r--r--tests/tcg/s390x/sam.S67
-rw-r--r--tests/unit/rcutorture.c3
-rw-r--r--tests/unit/test-rcu-list.c3
-rw-r--r--util/uri.c3
133 files changed, 1543 insertions, 1572 deletions
diff --git a/.gitlab-ci.d/base.yml b/.gitlab-ci.d/base.yml
index 50fb59e147..0274228de8 100644
--- a/.gitlab-ci.d/base.yml
+++ b/.gitlab-ci.d/base.yml
@@ -11,6 +11,8 @@
# and show the duration of each line.
FF_SCRIPT_SECTIONS: 1
+ interruptible: true
+
rules:
#############################################################
# Stage 1: exclude scenarios where we definitely don't
diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml
index 73ecfabb8d..cb96b55c3f 100644
--- a/.gitlab-ci.d/buildtest-template.yml
+++ b/.gitlab-ci.d/buildtest-template.yml
@@ -11,12 +11,10 @@
fi
- mkdir build
- cd build
- - if test -n "$TARGETS";
- then
- ../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS --target-list="$TARGETS" ;
- else
- ../configure --enable-werror --disable-docs ${LD_JOBS:+--meson=git} $CONFIGURE_ARGS ;
- fi || { cat config.log meson-logs/meson-log.txt && exit 1; }
+ - ../configure --enable-werror --disable-docs --enable-fdt=system
+ ${LD_JOBS:+--meson=git} ${TARGETS:+--target-list="$TARGETS"}
+ $CONFIGURE_ARGS ||
+ { cat config.log meson-logs/meson-log.txt && exit 1; }
- if test -n "$LD_JOBS";
then
../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ;
diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml
index 8f332fc36f..d903c42798 100644
--- a/.gitlab-ci.d/buildtest.yml
+++ b/.gitlab-ci.d/buildtest.yml
@@ -41,8 +41,8 @@ build-system-ubuntu:
job: amd64-ubuntu2004-container
variables:
IMAGE: ubuntu2004
- CONFIGURE_ARGS: --enable-docs --enable-fdt=system --enable-capstone
- TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu
+ CONFIGURE_ARGS: --enable-docs
+ TARGETS: alpha-softmmu cris-softmmu hppa-softmmu
microblazeel-softmmu mips64el-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
@@ -74,6 +74,7 @@ build-system-debian:
job: amd64-debian-container
variables:
IMAGE: debian-amd64
+ CONFIGURE_ARGS: --with-coroutine=sigaltstack
TARGETS: arm-softmmu avr-softmmu i386-softmmu mipsel-softmmu
riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensaeb-softmmu
MAKE_CHECK_ARGS: check-build
@@ -119,7 +120,6 @@ build-system-fedora:
variables:
IMAGE: fedora
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
- --enable-fdt=system --enable-slirp --enable-capstone
TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
MAKE_CHECK_ARGS: check-build
@@ -165,9 +165,8 @@ build-system-centos:
job: amd64-centos8-container
variables:
IMAGE: centos8
- CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-fdt=system
+ CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-vfio-user-server
--enable-modules --enable-trace-backends=dtrace --enable-docs
- --enable-vfio-user-server
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build
@@ -200,7 +199,6 @@ build-system-opensuse:
job: amd64-opensuse-leap-container
variables:
IMAGE: opensuse-leap
- CONFIGURE_ARGS: --enable-fdt=system
TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
@@ -463,7 +461,7 @@ tsan-build:
variables:
IMAGE: ubuntu2004
CONFIGURE_ARGS: --enable-tsan --cc=clang-10 --cxx=clang++-10
- --enable-trace-backends=ust --enable-fdt=system --disable-slirp
+ --enable-trace-backends=ust --disable-slirp
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
MAKE_CHECK_ARGS: bench V=1
@@ -534,18 +532,6 @@ build-tci:
- QTEST_QEMU_BINARY="./qemu-system-s390x" ./tests/qtest/pxe-test -m slow
- make check-tcg
-# Alternate coroutines implementations are only really of interest to KVM users
-# However we can't test against KVM on Gitlab-CI so we can only run unit tests
-build-coroutine-sigaltstack:
- extends: .native_build_job_template
- needs:
- job: amd64-ubuntu2004-container
- variables:
- IMAGE: ubuntu2004
- CONFIGURE_ARGS: --with-coroutine=sigaltstack --disable-tcg
- --enable-trace-backends=ftrace
- MAKE_CHECK_ARGS: check-unit
-
# Check our reduced build configurations
build-without-defaults:
extends: .native_build_job_template
diff --git a/.gitlab-ci.d/crossbuild-template.yml b/.gitlab-ci.d/crossbuild-template.yml
index 6d709628f1..d07989e3b0 100644
--- a/.gitlab-ci.d/crossbuild-template.yml
+++ b/.gitlab-ci.d/crossbuild-template.yml
@@ -6,8 +6,9 @@
script:
- mkdir build
- cd build
- - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
- --disable-user --target-list-exclude="arm-softmmu cris-softmmu
+ - ../configure --enable-werror --disable-docs --enable-fdt=system
+ --disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
+ --target-list-exclude="arm-softmmu cris-softmmu
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS"
diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml
index 74d6259b90..101416080c 100644
--- a/.gitlab-ci.d/crossbuilds.yml
+++ b/.gitlab-ci.d/crossbuilds.yml
@@ -159,7 +159,7 @@ cross-s390x-kvm-only:
job: s390x-debian-cross-container
variables:
IMAGE: debian-s390x-cross
- EXTRA_CONFIGURE_OPTS: --disable-tcg
+ EXTRA_CONFIGURE_OPTS: --disable-tcg --enable-trace-backends=ftrace
cross-mips64el-kvm-only:
extends: .cross_accel_build_job
@@ -175,6 +175,7 @@ cross-win32-system:
job: win32-fedora-cross-container
variables:
IMAGE: fedora-win32-cross
+ EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
microblazeel-softmmu mips64el-softmmu nios2-softmmu
artifacts:
@@ -187,6 +188,7 @@ cross-win64-system:
job: win64-fedora-cross-container
variables:
IMAGE: fedora-win64-cross
+ EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu
m68k-softmmu microblazeel-softmmu nios2-softmmu
or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml
index cf445b77f6..87235e43b4 100644
--- a/.gitlab-ci.d/windows.yml
+++ b/.gitlab-ci.d/windows.yml
@@ -38,6 +38,7 @@ msys2-64bit:
mingw-w64-x86_64-capstone
mingw-w64-x86_64-curl
mingw-w64-x86_64-cyrus-sasl
+ mingw-w64-x86_64-dtc
mingw-w64-x86_64-gcc
mingw-w64-x86_64-glib2
mingw-w64-x86_64-gnutls
@@ -71,7 +72,7 @@ msys2-64bit:
# for the msys2 64-bit job, due to the build could not complete within
# the project timeout.
- ..\msys64\usr\bin\bash -lc '../configure --target-list=x86_64-softmmu
- --without-default-devices'
+ --without-default-devices --enable-fdt=system'
- ..\msys64\usr\bin\bash -lc 'make'
# qTests don't run successfully with "--without-default-devices",
# so let's exclude the qtests from CI for now.
@@ -86,6 +87,7 @@ msys2-32bit:
mingw-w64-i686-capstone
mingw-w64-i686-curl
mingw-w64-i686-cyrus-sasl
+ mingw-w64-i686-dtc
mingw-w64-i686-gcc
mingw-w64-i686-glib2
mingw-w64-i686-gnutls
@@ -113,7 +115,8 @@ msys2-32bit:
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
- mkdir output
- cd output
- - ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu'
+ - ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu
+ --enable-fdt=system'
- ..\msys64\usr\bin\bash -lc 'make'
- ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" ||
{ cat meson-logs/testlog.txt; exit 1; }'
diff --git a/MAINTAINERS b/MAINTAINERS
index 896e411918..6db664825d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -161,6 +161,7 @@ M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
S: Maintained
F: target/arm/
+F: target/arm/tcg/
F: tests/tcg/arm/
F: tests/tcg/aarch64/
F: tests/qtest/arm-cpu-features.c
@@ -287,6 +288,9 @@ RISC-V TCG CPUs
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Alistair Francis <alistair.francis@wdc.com>
M: Bin Meng <bin.meng@windriver.com>
+R: Weiwei Li <liweiwei@iscas.ac.cn>
+R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
+R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
L: qemu-riscv@nongnu.org
S: Supported
F: target/riscv/
diff --git a/configure b/configure
index 0e41c5e36c..2a8a9be8a1 100755
--- a/configure
+++ b/configure
@@ -31,8 +31,12 @@ then
fi
fi
- mkdir build
- touch $MARKER
+ if ! mkdir build || ! touch $MARKER
+ then
+ echo "ERROR: Could not create ./build directory. Check the permissions on"
+ echo "your source directory, or try doing an out-of-tree build."
+ exit 1
+ fi
cat > GNUmakefile <<'EOF'
# This file is auto-generated by configure to support in-source tree
diff --git a/contrib/gitdm/filetypes.txt b/contrib/gitdm/filetypes.txt
index d2d6f6db8d..b1d01c0992 100644
--- a/contrib/gitdm/filetypes.txt
+++ b/contrib/gitdm/filetypes.txt
@@ -12,8 +12,7 @@
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Authors : Gregorio Robles <grex@gsyc.escet.urjc.es>
# Authors : Germán Póo-Caamaño <gpoo@gnome.org>
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
index ee95bcb1a6..15084f7bea 100644
--- a/docs/about/deprecated.rst
+++ b/docs/about/deprecated.rst
@@ -99,6 +99,12 @@ form is preferred.
The HPET setting has been turned into a machine property.
Use ``-machine hpet=off`` instead.
+``-no-acpi`` (since 8.0)
+''''''''''''''''''''''''
+
+The ``-no-acpi`` setting has been turned into a machine property.
+Use ``-machine acpi=off`` instead.
+
``-accel hax`` (since 8.0)
''''''''''''''''''''''''''
diff --git a/docs/devel/kconfig.rst b/docs/devel/kconfig.rst
index 69674d008a..cc1a456edf 100644
--- a/docs/devel/kconfig.rst
+++ b/docs/devel/kconfig.rst
@@ -306,6 +306,6 @@ variable::
host_kconfig = \
(have_tpm ? ['CONFIG_TPM=y'] : []) + \
- ('CONFIG_SPICE' in config_host ? ['CONFIG_SPICE=y'] : []) + \
+ ('CONFIG_LINUX' in config_host ? ['CONFIG_LINUX=y'] : []) + \
(have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \
...
diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c
index dc1966ff7a..b7ca795c71 100644
--- a/hw/arm/allwinner-a10.c
+++ b/hw/arm/allwinner-a10.c
@@ -18,6 +18,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/module.h"
+#include "hw/char/serial.h"
#include "hw/sysbus.h"
#include "hw/arm/allwinner-a10.h"
#include "hw/misc/unimp.h"
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 3d7d11f782..1e021c4a34 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -15,6 +15,7 @@
#include "hw/arm/boot.h"
#include "hw/arm/linux-boot-if.h"
#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
#include "sysemu/sysemu.h"
#include "sysemu/numa.h"
#include "hw/boards.h"
@@ -827,7 +828,10 @@ static void do_cpu_reset(void *opaque)
info->secondary_cpu_reset_hook(cpu, info);
}
}
- arm_rebuild_hflags(env);
+
+ if (tcg_enabled()) {
+ arm_rebuild_hflags(env);
+ }
}
}
diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c
index 8dafa2215b..6f2dda13f6 100644
--- a/hw/arm/exynos4210.c
+++ b/hw/arm/exynos4210.c
@@ -507,7 +507,7 @@ static uint64_t exynos4210_calc_affinity(int cpu)
return (0x9 << ARM_AFF1_SHIFT) | cpu;
}
-static DeviceState *pl330_create(uint32_t base, qemu_or_irq *orgate,
+static DeviceState *pl330_create(uint32_t base, OrIRQState *orgate,
qemu_irq irq, int nreq, int nevents, int width)
{
SysBusDevice *busdev;
@@ -806,7 +806,7 @@ static void exynos4210_init(Object *obj)
for (i = 0; i < ARRAY_SIZE(s->pl330_irq_orgate); i++) {
char *name = g_strdup_printf("pl330-irq-orgate%d", i);
- qemu_or_irq *orgate = &s->pl330_irq_orgate[i];
+ OrIRQState *orgate = &s->pl330_irq_orgate[i];
object_initialize_child(obj, name, orgate, TYPE_OR_IRQ);
g_free(name);
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
index 284c09c91d..07aecd9497 100644
--- a/hw/arm/mps2-tz.c
+++ b/hw/arm/mps2-tz.c
@@ -152,7 +152,7 @@ struct MPS2TZMachineState {
TZMSC msc[4];
CMSDKAPBUART uart[6];
SplitIRQ sec_resp_splitter;
- qemu_or_irq uart_irq_orgate;
+ OrIRQState uart_irq_orgate;
DeviceState *lan9118;
SplitIRQ cpu_irq_splitter[MPS2TZ_NUMIRQ_MAX];
Clock *sysclk;
diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c
index a86a994dba..d92fd60684 100644
--- a/hw/arm/mps2.c
+++ b/hw/arm/mps2.c
@@ -35,6 +35,7 @@
#include "hw/boards.h"
#include "exec/address-spaces.h"
#include "sysemu/sysemu.h"
+#include "hw/qdev-properties.h"
#include "hw/misc/unimp.h"
#include "hw/char/cmsdk-apb-uart.h"
#include "hw/timer/cmsdk-apb-timer.h"
@@ -282,6 +283,9 @@ static void mps2_common_init(MachineState *machine)
qdev_connect_gpio_out(orgate_dev, 0, qdev_get_gpio_in(armv7m, 12));
for (i = 0; i < 5; i++) {
+ DeviceState *dev;
+ SysBusDevice *s;
+
static const hwaddr uartbase[] = {0x40004000, 0x40005000,
0x40006000, 0x40007000,
0x40009000};
@@ -294,12 +298,16 @@ static void mps2_common_init(MachineState *machine)
rxovrint = qdev_get_gpio_in(orgate_dev, i * 2 + 1);
}
- cmsdk_apb_uart_create(uartbase[i],
- qdev_get_gpio_in(armv7m, uartirq[i] + 1),
- qdev_get_gpio_in(armv7m, uartirq[i]),
- txovrint, rxovrint,
- NULL,
- serial_hd(i), SYSCLK_FRQ);
+ dev = qdev_new(TYPE_CMSDK_APB_UART);
+ s = SYS_BUS_DEVICE(dev);
+ qdev_prop_set_chr(dev, "chardev", serial_hd(i));
+ qdev_prop_set_uint32(dev, "pclk-frq", SYSCLK_FRQ);
+ sysbus_realize_and_unref(s, &error_fatal);
+ sysbus_mmio_map(s, 0, uartbase[i]);
+ sysbus_connect_irq(s, 0, qdev_get_gpio_in(armv7m, uartirq[i] + 1));
+ sysbus_connect_irq(s, 1, qdev_get_gpio_in(armv7m, uartirq[i]));
+ sysbus_connect_irq(s, 2, txovrint);
+ sysbus_connect_irq(s, 3, rxovrint);
}
break;
}
@@ -324,7 +332,8 @@ static void mps2_common_init(MachineState *machine)
0x4002c000, 0x4002d000,
0x4002e000};
Object *txrx_orgate;
- DeviceState *txrx_orgate_dev;
+ DeviceState *txrx_orgate_dev, *dev;
+ SysBusDevice *s;
txrx_orgate = object_new(TYPE_OR_IRQ);
object_property_set_int(txrx_orgate, "num-lines", 2, &error_fatal);
@@ -332,13 +341,17 @@ static void mps2_common_init(MachineState *machine)
txrx_orgate_dev = DEVICE(txrx_orgate);
qdev_connect_gpio_out(txrx_orgate_dev, 0,
qdev_get_gpio_in(armv7m, uart_txrx_irqno[i]));
- cmsdk_apb_uart_create(uartbase[i],
- qdev_get_gpio_in(txrx_orgate_dev, 0),
- qdev_get_gpio_in(txrx_orgate_dev, 1),
- qdev_get_gpio_in(orgate_dev, i * 2),
- qdev_get_gpio_in(orgate_dev, i * 2 + 1),
- NULL,
- serial_hd(i), SYSCLK_FRQ);
+
+ dev = qdev_new(TYPE_CMSDK_APB_UART);
+ s = SYS_BUS_DEVICE(dev);
+ qdev_prop_set_chr(dev, "chardev", serial_hd(i));
+ qdev_prop_set_uint32(dev, "pclk-frq", SYSCLK_FRQ);
+ sysbus_realize_and_unref(s, &error_fatal);
+ sysbus_mmio_map(s, 0, uartbase[i]);
+ sysbus_connect_irq(s, 0, qdev_get_gpio_in(txrx_orgate_dev, 0));
+ sysbus_connect_irq(s, 1, qdev_get_gpio_in(txrx_orgate_dev, 1));
+ sysbus_connect_irq(s, 2, qdev_get_gpio_in(orgate_dev, i * 2));
+ sysbus_connect_irq(s, 3, qdev_get_gpio_in(orgate_dev, i * 2 + 1));
}
break;
}
diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
index 89b66606c3..06d9add7c7 100644
--- a/hw/arm/musicpal.c
+++ b/hw/arm/musicpal.c
@@ -1072,7 +1072,6 @@ struct musicpal_key_state {
SysBusDevice parent_obj;
/*< public >*/
- MemoryRegion iomem;
uint32_t kbd_extended;
uint32_t pressed_keys;
qemu_irq out[8];
@@ -1161,9 +1160,6 @@ static void musicpal_key_init(Object *obj)
DeviceState *dev = DEVICE(sbd);
musicpal_key_state *s = MUSICPAL_KEY(dev);
- memory_region_init(&s->iomem, obj, "dummy", 0);
- sysbus_init_mmio(sbd, &s->iomem);
-
s->kbd_extended = 0;
s->pressed_keys = 0;
diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c
index 67a2293d35..f7e99baf62 100644
--- a/hw/arm/stellaris.c
+++ b/hw/arm/stellaris.c
@@ -1146,9 +1146,14 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
for (i = 0; i < 4; i++) {
if (board->dc2 & (1 << i)) {
- pl011_luminary_create(0x4000c000 + i * 0x1000,
- qdev_get_gpio_in(nvic, uart_irq[i]),
- serial_hd(i));
+ SysBusDevice *sbd;
+
+ dev = qdev_new("pl011_luminary");
+ sbd = SYS_BUS_DEVICE(dev);
+ qdev_prop_set_chr(dev, "chardev", serial_hd(i));
+ sysbus_realize_and_unref(sbd, &error_fatal);
+ sysbus_mmio_map(sbd, 0, 0x4000c000 + i * 0x1000);
+ sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(nvic, uart_irq[i]));
}
}
if (board->dc2 & (1 << 4)) {
diff --git a/hw/char/pl011.c b/hw/char/pl011.c
index c15cb7af20..77bbc2a982 100644
--- a/hw/char/pl011.c
+++ b/hw/char/pl011.c
@@ -19,10 +19,12 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/char/pl011.h"
#include "hw/irq.h"
#include "hw/sysbus.h"
#include "hw/qdev-clock.h"
+#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "migration/vmstate.h"
#include "chardev/char-fe.h"
@@ -31,6 +33,21 @@
#include "qemu/module.h"
#include "trace.h"
+DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr)
+{
+ DeviceState *dev;
+ SysBusDevice *s;
+
+ dev = qdev_new("pl011");
+ s = SYS_BUS_DEVICE(dev);
+ qdev_prop_set_chr(dev, "chardev", chr);
+ sysbus_realize_and_unref(s, &error_fatal);
+ sysbus_mmio_map(s, 0, addr);
+ sysbus_connect_irq(s, 0, irq);
+
+ return dev;
+}
+
#define PL011_INT_TX 0x20
#define PL011_INT_RX 0x10
diff --git a/hw/char/xilinx_uartlite.c b/hw/char/xilinx_uartlite.c
index 99b9a6f851..180bb97202 100644
--- a/hw/char/xilinx_uartlite.c
+++ b/hw/char/xilinx_uartlite.c
@@ -24,6 +24,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
+#include "hw/char/xilinx_uartlite.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
@@ -53,9 +54,6 @@
#define CONTROL_RST_RX 0x02
#define CONTROL_IE 0x10
-#define TYPE_XILINX_UARTLITE "xlnx.xps-uartlite"
-OBJECT_DECLARE_SIMPLE_TYPE(XilinxUARTLite, XILINX_UARTLITE)
-
struct XilinxUARTLite {
SysBusDevice parent_obj;
diff --git a/hw/core/irq.c b/hw/core/irq.c
index 3623f711fe..3f14e2dda7 100644
--- a/hw/core/irq.c
+++ b/hw/core/irq.c
@@ -26,8 +26,7 @@
#include "hw/irq.h"
#include "qom/object.h"
-DECLARE_INSTANCE_CHECKER(struct IRQState, IRQ,
- TYPE_IRQ)
+OBJECT_DECLARE_SIMPLE_TYPE(IRQState, IRQ)
struct IRQState {
Object parent_obj;
@@ -68,7 +67,7 @@ qemu_irq *qemu_allocate_irqs(qemu_irq_handler handler, void *opaque, int n)
qemu_irq qemu_allocate_irq(qemu_irq_handler handler, void *opaque, int n)
{
- struct IRQState *irq;
+ IRQState *irq;
irq = IRQ(object_new(TYPE_IRQ));
irq->handler = handler;
@@ -94,7 +93,7 @@ void qemu_free_irq(qemu_irq irq)
static void qemu_notirq(void *opaque, int line, int level)
{
- struct IRQState *irq = opaque;
+ IRQState *irq = opaque;
irq->handler(irq->opaque, irq->n, !level);
}
@@ -120,7 +119,7 @@ void qemu_irq_intercept_in(qemu_irq *gpio_in, qemu_irq_handler handler, int n)
static const TypeInfo irq_type_info = {
.name = TYPE_IRQ,
.parent = TYPE_OBJECT,
- .instance_size = sizeof(struct IRQState),
+ .instance_size = sizeof(IRQState),
};
static void irq_register_types(void)
diff --git a/hw/core/or-irq.c b/hw/core/or-irq.c
index d8f3754e96..1df4bc05a7 100644
--- a/hw/core/or-irq.c
+++ b/hw/core/or-irq.c
@@ -31,7 +31,7 @@
static void or_irq_handler(void *opaque, int n, int level)
{
- qemu_or_irq *s = OR_IRQ(opaque);
+ OrIRQState *s = OR_IRQ(opaque);
int or_level = 0;
int i;
@@ -46,7 +46,7 @@ static void or_irq_handler(void *opaque, int n, int level)
static void or_irq_reset(DeviceState *dev)
{
- qemu_or_irq *s = OR_IRQ(dev);
+ OrIRQState *s = OR_IRQ(dev);
int i;
for (i = 0; i < MAX_OR_LINES; i++) {
@@ -56,7 +56,7 @@ static void or_irq_reset(DeviceState *dev)
static void or_irq_realize(DeviceState *dev, Error **errp)
{
- qemu_or_irq *s = OR_IRQ(dev);
+ OrIRQState *s = OR_IRQ(dev);
assert(s->num_lines <= MAX_OR_LINES);
@@ -65,7 +65,7 @@ static void or_irq_realize(DeviceState *dev, Error **errp)
static void or_irq_init(Object *obj)
{
- qemu_or_irq *s = OR_IRQ(obj);
+ OrIRQState *s = OR_IRQ(obj);
qdev_init_gpio_out(DEVICE(obj), &s->out_irq, 1);
}
@@ -84,7 +84,7 @@ static void or_irq_init(Object *obj)
static bool vmstate_extras_needed(void *opaque)
{
- qemu_or_irq *s = OR_IRQ(opaque);
+ OrIRQState *s = OR_IRQ(opaque);
return s->num_lines >= OLD_MAX_OR_LINES;
}
@@ -95,7 +95,7 @@ static const VMStateDescription vmstate_or_irq_extras = {
.minimum_version_id = 1,
.needed = vmstate_extras_needed,
.fields = (VMStateField[]) {
- VMSTATE_VARRAY_UINT16_UNSAFE(levels, qemu_or_irq, num_lines, 0,
+ VMSTATE_VARRAY_UINT16_UNSAFE(levels, OrIRQState, num_lines, 0,
vmstate_info_bool, bool),
VMSTATE_END_OF_LIST(),
},
@@ -106,7 +106,7 @@ static const VMStateDescription vmstate_or_irq = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_BOOL_SUB_ARRAY(levels, qemu_or_irq, 0, OLD_MAX_OR_LINES),
+ VMSTATE_BOOL_SUB_ARRAY(levels, OrIRQState, 0, OLD_MAX_OR_LINES),
VMSTATE_END_OF_LIST(),
},
.subsections = (const VMStateDescription*[]) {
@@ -116,7 +116,7 @@ static const VMStateDescription vmstate_or_irq = {
};
static Property or_irq_properties[] = {
- DEFINE_PROP_UINT16("num-lines", qemu_or_irq, num_lines, 1),
+ DEFINE_PROP_UINT16("num-lines", OrIRQState, num_lines, 1),
DEFINE_PROP_END_OF_LIST(),
};
@@ -136,7 +136,7 @@ static void or_irq_class_init(ObjectClass *klass, void *data)
static const TypeInfo or_irq_type_info = {
.name = TYPE_OR_IRQ,
.parent = TYPE_DEVICE,
- .instance_size = sizeof(qemu_or_irq),
+ .instance_size = sizeof(OrIRQState),
.instance_init = or_irq_init,
.class_init = or_irq_class_init,
};
diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c
index db6b5e3d76..031482d939 100644
--- a/hw/gpio/max7310.c
+++ b/hw/gpio/max7310.c
@@ -183,11 +183,10 @@ static void max7310_gpio_set(void *opaque, int line, int level)
* but also accepts sequences that are not SMBus so return an I2C device. */
static void max7310_realize(DeviceState *dev, Error **errp)
{
- I2CSlave *i2c = I2C_SLAVE(dev);
MAX7310State *s = MAX7310(dev);
- qdev_init_gpio_in(&i2c->qdev, max7310_gpio_set, 8);
- qdev_init_gpio_out(&i2c->qdev, s->handler, 8);
+ qdev_init_gpio_in(dev, max7310_gpio_set, ARRAY_SIZE(s->handler));
+ qdev_init_gpio_out(dev, s->handler, ARRAY_SIZE(s->handler));
}
static void max7310_class_init(ObjectClass *klass, void *data)
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index e54553283f..63afe1fdf5 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -18,6 +18,7 @@
#include "hw/intc/armv7m_nvic.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
+#include "sysemu/tcg.h"
#include "sysemu/runstate.h"
#include "target/arm/cpu.h"
#include "exec/exec-all.h"
@@ -577,7 +578,7 @@ static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
* which saves having to have an extra argument is_terminal
* that we'd only use in one place.
*/
- cpu_abort(&s->cpu->parent_obj,
+ cpu_abort(CPU(s->cpu),
"Lockup: can't take terminal derived exception "
"(original exception priority %d)\n",
s->vectpending_prio);
@@ -643,7 +644,7 @@ static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
* Lockup condition due to a guest bug. We don't model
* Lockup, so report via cpu_abort() instead.
*/
- cpu_abort(&s->cpu->parent_obj,
+ cpu_abort(CPU(s->cpu),
"Lockup: can't escalate %d to HardFault "
"(current priority %d)\n", irq, running);
}
@@ -741,7 +742,7 @@ void armv7m_nvic_set_pending_lazyfp(NVICState *s, int irq, bool secure)
* We want to escalate to HardFault but the context the
* FP state belongs to prevents the exception pre-empting.
*/
- cpu_abort(&s->cpu->parent_obj,
+ cpu_abort(CPU(s->cpu),
"Lockup: can't escalate to HardFault during "
"lazy FP register stacking\n");
}
@@ -2454,8 +2455,10 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
/* This is UNPREDICTABLE; treat as RAZ/WI */
exit_ok:
- /* Ensure any changes made are reflected in the cached hflags. */
- arm_rebuild_hflags(&s->cpu->env);
+ if (tcg_enabled()) {
+ /* Ensure any changes made are reflected in the cached hflags. */
+ arm_rebuild_hflags(&s->cpu->env);
+ }
return MEMTX_OK;
}
@@ -2636,11 +2639,14 @@ static void armv7m_nvic_reset(DeviceState *dev)
}
}
- /*
- * We updated state that affects the CPU's MMUidx and thus its hflags;
- * and we can't guarantee that we run before the CPU reset function.
- */
- arm_rebuild_hflags(&s->cpu->env);
+ if (tcg_enabled()) {
+ /*
+ * We updated state that affects the CPU's MMUidx and thus its
+ * hflags; and we can't guarantee that we run before the CPU
+ * reset function.
+ */
+ arm_rebuild_hflags(&s->cpu->env);
+ }
}
static void nvic_systick_trigger(void *opaque, int n, int level)
diff --git a/hw/microblaze/petalogix_s3adsp1800_mmu.c b/hw/microblaze/petalogix_s3adsp1800_mmu.c
index 9d959d1ad8..505639c298 100644
--- a/hw/microblaze/petalogix_s3adsp1800_mmu.c
+++ b/hw/microblaze/petalogix_s3adsp1800_mmu.c
@@ -100,8 +100,11 @@ petalogix_s3adsp1800_init(MachineState *machine)
irq[i] = qdev_get_gpio_in(dev, i);
}
- xilinx_uartlite_create(UARTLITE_BASEADDR, irq[UARTLITE_IRQ],
- serial_hd(0));
+ dev = qdev_new(TYPE_XILINX_UARTLITE);
+ qdev_prop_set_chr(dev, "chardev", serial_hd(0));
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, UARTLITE_BASEADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[UARTLITE_IRQ]);
/* 2 timers at irq 2 @ 62 Mhz. */
dev = qdev_new("xlnx.xps-timer");
diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c
index f43f33fbd9..298564f1f5 100644
--- a/hw/pci-host/mv64361.c
+++ b/hw/pci-host/mv64361.c
@@ -11,7 +11,6 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "hw/hw.h"
#include "hw/sysbus.h"
#include "hw/pci/pci_device.h"
#include "hw/pci/pci_host.h"
diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c
index cdfb62ac2e..072ffe3c5e 100644
--- a/hw/pci-host/raven.c
+++ b/hw/pci-host/raven.c
@@ -60,7 +60,7 @@ DECLARE_INSTANCE_CHECKER(PREPPCIState, RAVEN_PCI_HOST_BRIDGE,
struct PRePPCIState {
PCIHostState parent_obj;
- qemu_or_irq *or_irq;
+ OrIRQState *or_irq;
qemu_irq pci_irqs[PCI_NUM_PINS];
PCIBus pci_bus;
AddressSpace pci_io_as;
diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c
index a9563f4fb2..7cc375df05 100644
--- a/hw/ppc/pegasos2.c
+++ b/hw/ppc/pegasos2.c
@@ -10,7 +10,6 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
-#include "hw/hw.h"
#include "hw/ppc/ppc.h"
#include "hw/sysbus.h"
#include "hw/pci/pci_host.h"
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
index c7e0e50bd8..52bf8e67de 100644
--- a/hw/riscv/boot.c
+++ b/hw/riscv/boot.c
@@ -173,12 +173,55 @@ target_ulong riscv_load_firmware(const char *firmware_filename,
exit(1);
}
+static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
+{
+ const char *filename = machine->initrd_filename;
+ uint64_t mem_size = machine->ram_size;
+ void *fdt = machine->fdt;
+ hwaddr start, end;
+ ssize_t size;
+
+ g_assert(filename != NULL);
+
+ /*
+ * We want to put the initrd far enough into RAM that when the
+ * kernel is uncompressed it will not clobber the initrd. However
+ * on boards without much RAM we must ensure that we still leave
+ * enough room for a decent sized initrd, and on boards with large
+ * amounts of RAM we must avoid the initrd being so far up in RAM
+ * that it is outside lowmem and inaccessible to the kernel.
+ * So for boards with less than 256MB of RAM we put the initrd
+ * halfway into RAM, and for boards with 256MB of RAM or more we put
+ * the initrd at 128MB.
+ */
+ start = kernel_entry + MIN(mem_size / 2, 128 * MiB);
+
+ size = load_ramdisk(filename, start, mem_size - start);
+ if (size == -1) {
+ size = load_image_targphys(filename, start, mem_size - start);
+ if (size == -1) {
+ error_report("could not load ramdisk '%s'", filename);
+ exit(1);
+ }
+ }
+
+ /* Some RISC-V machines (e.g. opentitan) don't have a fdt. */
+ if (fdt) {
+ end = start + size;
+ qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", start);
+ qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", end);
+ }
+}
+
target_ulong riscv_load_kernel(MachineState *machine,
+ RISCVHartArrayState *harts,
target_ulong kernel_start_addr,
+ bool load_initrd,
symbol_fn_t sym_cb)
{
const char *kernel_filename = machine->kernel_filename;
uint64_t kernel_load_base, kernel_entry;
+ void *fdt = machine->fdt;
g_assert(kernel_filename != NULL);
@@ -192,61 +235,43 @@ target_ulong riscv_load_kernel(MachineState *machine,
if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL,
NULL, &kernel_load_base, NULL, NULL, 0,
EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
- return kernel_load_base;
+ kernel_entry = kernel_load_base;
+ goto out;
}
if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL,
NULL, NULL, NULL) > 0) {
- return kernel_entry;
+ goto out;
}
if (load_image_targphys_as(kernel_filename, kernel_start_addr,
current_machine->ram_size, NULL) > 0) {
- return kernel_start_addr;
+ kernel_entry = kernel_start_addr;
+ goto out;
}
error_report("could not load kernel '%s'", kernel_filename);
exit(1);
-}
-
-void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
-{
- const char *filename = machine->initrd_filename;
- uint64_t mem_size = machine->ram_size;
- void *fdt = machine->fdt;
- hwaddr start, end;
- ssize_t size;
-
- g_assert(filename != NULL);
+out:
/*
- * We want to put the initrd far enough into RAM that when the
- * kernel is uncompressed it will not clobber the initrd. However
- * on boards without much RAM we must ensure that we still leave
- * enough room for a decent sized initrd, and on boards with large
- * amounts of RAM we must avoid the initrd being so far up in RAM
- * that it is outside lowmem and inaccessible to the kernel.
- * So for boards with less than 256MB of RAM we put the initrd
- * halfway into RAM, and for boards with 256MB of RAM or more we put
- * the initrd at 128MB.
+ * For 32 bit CPUs 'kernel_entry' can be sign-extended by
+ * load_elf_ram_sym().
*/
- start = kernel_entry + MIN(mem_size / 2, 128 * MiB);
+ if (riscv_is_32bit(harts)) {
+ kernel_entry = extract64(kernel_entry, 0, 32);
+ }
- size = load_ramdisk(filename, start, mem_size - start);
- if (size == -1) {
- size = load_image_targphys(filename, start, mem_size - start);
- if (size == -1) {
- error_report("could not load ramdisk '%s'", filename);
- exit(1);
- }
+ if (load_initrd && machine->initrd_filename) {
+ riscv_load_initrd(machine, kernel_entry);
}
- /* Some RISC-V machines (e.g. opentitan) don't have a fdt. */
- if (fdt) {
- end = start + size;
- qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", start);
- qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", end);
+ if (fdt && machine->kernel_cmdline && *machine->kernel_cmdline) {
+ qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
+ machine->kernel_cmdline);
}
+
+ return kernel_entry;
}
/*
diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c
index 2b91e49561..e81bbd12df 100644
--- a/hw/riscv/microchip_pfsoc.c
+++ b/hw/riscv/microchip_pfsoc.c
@@ -629,16 +629,8 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr, NULL);
-
- if (machine->initrd_filename) {
- riscv_load_initrd(machine, kernel_entry);
- }
-
- if (machine->kernel_cmdline && *machine->kernel_cmdline) {
- qemu_fdt_setprop_string(machine->fdt, "/chosen",
- "bootargs", machine->kernel_cmdline);
- }
+ kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
+ kernel_start_addr, true, NULL);
/* Compute the fdt load address in dram */
fdt_load_addr = riscv_compute_fdt_addr(memmap[MICROCHIP_PFSOC_DRAM_LO].base,
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
index 353f030d80..b06944d382 100644
--- a/hw/riscv/opentitan.c
+++ b/hw/riscv/opentitan.c
@@ -101,7 +101,9 @@ static void opentitan_board_init(MachineState *machine)
}
if (machine->kernel_filename) {
- riscv_load_kernel(machine, memmap[IBEX_DEV_RAM].base, NULL);
+ riscv_load_kernel(machine, &s->soc.cpus,
+ memmap[IBEX_DEV_RAM].base,
+ false, NULL);
}
}
diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c
index 3e3f4b0088..04939b60c3 100644
--- a/hw/riscv/sifive_e.c
+++ b/hw/riscv/sifive_e.c
@@ -114,7 +114,9 @@ static void sifive_e_machine_init(MachineState *machine)
memmap[SIFIVE_E_DEV_MROM].base, &address_space_memory);
if (machine->kernel_filename) {
- riscv_load_kernel(machine, memmap[SIFIVE_E_DEV_DTIM].base, NULL);
+ riscv_load_kernel(machine, &s->soc.cpus,
+ memmap[SIFIVE_E_DEV_DTIM].base,
+ false, NULL);
}
}
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
index d3ab7a9cda..ad3bb35b34 100644
--- a/hw/riscv/sifive_u.c
+++ b/hw/riscv/sifive_u.c
@@ -598,16 +598,8 @@ static void sifive_u_machine_init(MachineState *machine)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr, NULL);
-
- if (machine->initrd_filename) {
- riscv_load_initrd(machine, kernel_entry);
- }
-
- if (machine->kernel_cmdline && *machine->kernel_cmdline) {
- qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs",
- machine->kernel_cmdline);
- }
+ kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
+ kernel_start_addr, true, NULL);
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
index cc3f6dac17..a584d5b3a2 100644
--- a/hw/riscv/spike.c
+++ b/hw/riscv/spike.c
@@ -305,17 +305,9 @@ static void spike_board_init(MachineState *machine)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
- htif_symbol_callback);
-
- if (machine->initrd_filename) {
- riscv_load_initrd(machine, kernel_entry);
- }
-
- if (machine->kernel_cmdline && *machine->kernel_cmdline) {
- qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs",
- machine->kernel_cmdline);
- }
+ kernel_entry = riscv_load_kernel(machine, &s->soc[0],
+ kernel_start_addr,
+ true, htif_symbol_callback);
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index b81081c70b..86c4adc0c9 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -1277,16 +1277,8 @@ static void virt_machine_done(Notifier *notifier, void *data)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr, NULL);
-
- if (machine->initrd_filename) {
- riscv_load_initrd(machine, kernel_entry);
- }
-
- if (machine->kernel_cmdline && *machine->kernel_cmdline) {
- qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs",
- machine->kernel_cmdline);
- }
+ kernel_entry = riscv_load_kernel(machine, &s->soc[0],
+ kernel_start_addr, true, NULL);
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode
diff --git a/hw/s390x/pv.c b/hw/s390x/pv.c
index 8a1c71436b..49ea38236c 100644
--- a/hw/s390x/pv.c
+++ b/hw/s390x/pv.c
@@ -16,6 +16,7 @@
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/kvm.h"
+#include "sysemu/cpus.h"
#include "qom/object_interfaces.h"
#include "exec/confidential-guest-support.h"
#include "hw/s390x/ipl.h"
@@ -108,6 +109,33 @@ void s390_pv_vm_disable(void)
s390_pv_cmd_exit(KVM_PV_DISABLE, NULL);
}
+static void *s390_pv_do_unprot_async_fn(void *p)
+{
+ s390_pv_cmd_exit(KVM_PV_ASYNC_CLEANUP_PERFORM, NULL);
+ return NULL;
+}
+
+bool s390_pv_vm_try_disable_async(void)
+{
+ /*
+ * t is only needed to create the thread; once qemu_thread_create
+ * returns, it can safely be discarded.
+ */
+ QemuThread t;
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) {
+ return false;
+ }
+ if (s390_pv_cmd(KVM_PV_ASYNC_CLEANUP_PREPARE, NULL) != 0) {
+ return false;
+ }
+
+ qemu_thread_create(&t, "async_cleanup", s390_pv_do_unprot_async_fn, NULL,
+ QEMU_THREAD_DETACHED);
+
+ return true;
+}
+
int s390_pv_set_sec_parms(uint64_t origin, uint64_t length)
{
struct kvm_s390_pv_sec_parm args = {
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index f22f61b8b6..503f212a31 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -41,6 +41,7 @@
#include "hw/qdev-properties.h"
#include "hw/s390x/tod.h"
#include "sysemu/sysemu.h"
+#include "sysemu/cpus.h"
#include "hw/s390x/pv.h"
#include "migration/blocker.h"
#include "qapi/visitor.h"
@@ -329,7 +330,9 @@ static inline void s390_do_cpu_ipl(CPUState *cs, run_on_cpu_data arg)
static void s390_machine_unprotect(S390CcwMachineState *ms)
{
- s390_pv_vm_disable();
+ if (!s390_pv_vm_try_disable_async()) {
+ s390_pv_vm_disable();
+ }
ms->pv = false;
migrate_del_blocker(pv_mig_blocker);
error_free_or_abort(&pv_mig_blocker);
diff --git a/hw/scsi/viosrp.h b/hw/scsi/viosrp.h
index e5f9768e8f..58c29aa925 100644
--- a/hw/scsi/viosrp.h
+++ b/hw/scsi/viosrp.h
@@ -16,8 +16,7 @@
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
-/* along with this program; if not, write to the Free Software */
-/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+/* along with this program. If not, see <https://www.gnu.org/licenses/>. */
/* */
/* */
/* This file contains structures and definitions for IBM RPA (RS/6000 */
diff --git a/hw/sensor/dps310.c b/hw/sensor/dps310.c
index d60a18ac41..addee99b19 100644
--- a/hw/sensor/dps310.c
+++ b/hw/sensor/dps310.c
@@ -9,7 +9,6 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
-#include "hw/hw.h"
#include "hw/i2c/i2c.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
diff --git a/hw/sh4/sh7750_regs.h b/hw/sh4/sh7750_regs.h
index beb571d5e9..94043431e6 100644
--- a/hw/sh4/sh7750_regs.h
+++ b/hw/sh4/sh7750_regs.h
@@ -22,8 +22,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details. You should have received
* a copy of the GNU General Public License along with RTEMS; see
- * file COPYING. If not, write to the Free Software Foundation, 675
- * Mass Ave, Cambridge, MA 02139, USA.
+ * file COPYING. If not, see <https://www.gnu.org/licenses/>.
*
* As a special exception, including RTEMS header files in a file,
* instantiating RTEMS generics or templates, or linking other files
diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c
index 0354737666..1e2fce83b0 100644
--- a/hw/vfio/ccw.c
+++ b/hw/vfio/ccw.c
@@ -76,8 +76,7 @@ struct VFIODeviceOps vfio_ccw_ops = {
static IOInstEnding vfio_ccw_handle_request(SubchDev *sch)
{
- S390CCWDevice *cdev = sch->driver_data;
- VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
struct ccw_io_region *region = vcdev->io_region;
int ret;
@@ -125,8 +124,7 @@ again:
static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
{
- S390CCWDevice *cdev = sch->driver_data;
- VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
SCHIB *schib = &sch->curr_status;
struct ccw_schib_region *region = vcdev->schib_region;
SCHIB *s;
@@ -170,8 +168,7 @@ static IOInstEnding vfio_ccw_handle_store(SubchDev *sch)
static int vfio_ccw_handle_clear(SubchDev *sch)
{
- S390CCWDevice *cdev = sch->driver_data;
- VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
struct ccw_cmd_region *region = vcdev->async_cmd_region;
int ret;
@@ -210,8 +207,7 @@ again:
static int vfio_ccw_handle_halt(SubchDev *sch)
{
- S390CCWDevice *cdev = sch->driver_data;
- VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ VFIOCCWDevice *vcdev = VFIO_CCW(sch->driver_data);
struct ccw_cmd_region *region = vcdev->async_cmd_region;
int ret;
@@ -251,9 +247,7 @@ again:
static void vfio_ccw_reset(DeviceState *dev)
{
- CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
- S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
- VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ VFIOCCWDevice *vcdev = VFIO_CCW(dev);
ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET);
}
@@ -315,8 +309,7 @@ static void vfio_ccw_io_notifier_handler(void *opaque)
{
VFIOCCWDevice *vcdev = opaque;
struct ccw_io_region *region = vcdev->io_region;
- S390CCWDevice *cdev = S390_CCW_DEVICE(vcdev);
- CcwDevice *ccw_dev = CCW_DEVICE(cdev);
+ CcwDevice *ccw_dev = CCW_DEVICE(vcdev);
SubchDev *sch = ccw_dev->sch;
SCHIB *schib = &sch->curr_status;
SCSW s;
@@ -588,9 +581,10 @@ static void vfio_ccw_put_device(VFIOCCWDevice *vcdev)
static void vfio_ccw_get_device(VFIOGroup *group, VFIOCCWDevice *vcdev,
Error **errp)
{
- char *name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid,
- vcdev->cdev.hostid.ssid,
- vcdev->cdev.hostid.devid);
+ S390CCWDevice *cdev = S390_CCW_DEVICE(vcdev);
+ char *name = g_strdup_printf("%x.%x.%04x", cdev->hostid.cssid,
+ cdev->hostid.ssid,
+ cdev->hostid.devid);
VFIODevice *vbasedev;
QLIST_FOREACH(vbasedev, &group->device_list, next) {
@@ -611,14 +605,14 @@ static void vfio_ccw_get_device(VFIOGroup *group, VFIOCCWDevice *vcdev,
*/
vcdev->vdev.ram_block_discard_allowed = true;
- if (vfio_get_device(group, vcdev->cdev.mdevid, &vcdev->vdev, errp)) {
+ if (vfio_get_device(group, cdev->mdevid, &vcdev->vdev, errp)) {
goto out_err;
}
vcdev->vdev.ops = &vfio_ccw_ops;
vcdev->vdev.type = VFIO_DEVICE_TYPE_CCW;
vcdev->vdev.name = name;
- vcdev->vdev.dev = &vcdev->cdev.parent_obj.parent_obj;
+ vcdev->vdev.dev = DEVICE(vcdev);
return;
@@ -656,9 +650,8 @@ static VFIOGroup *vfio_ccw_get_group(S390CCWDevice *cdev, Error **errp)
static void vfio_ccw_realize(DeviceState *dev, Error **errp)
{
VFIOGroup *group;
- CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
- S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
- VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
+ VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
Error *err = NULL;
@@ -728,9 +721,8 @@ out_err_propagate:
static void vfio_ccw_unrealize(DeviceState *dev)
{
- CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
- S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
- VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev);
+ S390CCWDevice *cdev = S390_CCW_DEVICE(dev);
+ VFIOCCWDevice *vcdev = VFIO_CCW(cdev);
S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev);
VFIOGroup *group = vcdev->vdev.group;
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 21309cf567..d5a4f30717 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -135,6 +135,10 @@ typedef struct CPUTLBEntry {
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
+
+#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
+
+#if !defined(CONFIG_USER_ONLY)
/*
* The full TLB entry, which is not accessed by generated TCG code,
* so the layout is not as critical as that of CPUTLBEntry. This is
@@ -176,7 +180,9 @@ typedef struct CPUTLBEntryFull {
TARGET_PAGE_ENTRY_EXTRA
#endif
} CPUTLBEntryFull;
+#endif /* !CONFIG_USER_ONLY */
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
/*
* Data elements that are per MMU mode, minus the bits accessed by
* the TCG fast path.
diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h
index 79e0c80568..095afb225d 100644
--- a/include/hw/arm/allwinner-a10.h
+++ b/include/hw/arm/allwinner-a10.h
@@ -1,9 +1,7 @@
#ifndef HW_ARM_ALLWINNER_A10_H
#define HW_ARM_ALLWINNER_A10_H
-#include "hw/char/serial.h"
#include "hw/arm/boot.h"
-#include "hw/pci/pci_device.h"
#include "hw/timer/allwinner-a10-pit.h"
#include "hw/intc/allwinner-a10-pic.h"
#include "hw/net/allwinner_emac.h"
diff --git a/include/hw/arm/armsse.h b/include/hw/arm/armsse.h
index 9648e7a419..cd0931d0a0 100644
--- a/include/hw/arm/armsse.h
+++ b/include/hw/arm/armsse.h
@@ -155,12 +155,12 @@ struct ARMSSE {
TZPPC apb_ppc[NUM_INTERNAL_PPCS];
TZMPC mpc[IOTS_NUM_MPC];
CMSDKAPBTimer timer[3];
- qemu_or_irq ppc_irq_orgate;
+ OrIRQState ppc_irq_orgate;
SplitIRQ sec_resp_splitter;
SplitIRQ ppc_irq_splitter[NUM_PPCS];
SplitIRQ mpc_irq_splitter[IOTS_NUM_EXP_MPC + IOTS_NUM_MPC];
- qemu_or_irq mpc_irq_orgate;
- qemu_or_irq nmi_orgate;
+ OrIRQState mpc_irq_orgate;
+ OrIRQState nmi_orgate;
SplitIRQ cpu_irq_splitter[NUM_SSE_IRQS];
diff --git a/include/hw/arm/bcm2835_peripherals.h b/include/hw/arm/bcm2835_peripherals.h
index c9d25d493e..d724a2fc28 100644
--- a/include/hw/arm/bcm2835_peripherals.h
+++ b/include/hw/arm/bcm2835_peripherals.h
@@ -56,7 +56,7 @@ struct BCM2835PeripheralState {
BCM2835AuxState aux;
BCM2835FBState fb;
BCM2835DMAState dma;
- qemu_or_irq orgated_dma_irq;
+ OrIRQState orgated_dma_irq;
BCM2835ICState ic;
BCM2835PropertyState property;
BCM2835RngState rng;
diff --git a/include/hw/arm/exynos4210.h b/include/hw/arm/exynos4210.h
index 97353f1c02..68db19f0cb 100644
--- a/include/hw/arm/exynos4210.h
+++ b/include/hw/arm/exynos4210.h
@@ -96,8 +96,8 @@ struct Exynos4210State {
MemoryRegion boot_secondary;
MemoryRegion bootreg_mem;
I2CBus *i2c_if[EXYNOS4210_I2C_NUMBER];
- qemu_or_irq pl330_irq_orgate[EXYNOS4210_NUM_DMA];
- qemu_or_irq cpu_irq_orgate[EXYNOS4210_NCPUS];
+ OrIRQState pl330_irq_orgate[EXYNOS4210_NUM_DMA];
+ OrIRQState cpu_irq_orgate[EXYNOS4210_NCPUS];
A9MPPrivState a9mpcore;
Exynos4210GicState ext_gic;
Exynos4210CombinerState int_combiner;
diff --git a/include/hw/arm/raspi_platform.h b/include/hw/arm/raspi_platform.h
index e0e6c8ce94..4a56dd4b89 100644
--- a/include/hw/arm/raspi_platform.h
+++ b/include/hw/arm/raspi_platform.h
@@ -18,8 +18,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
*
* Various undocumented addresses and names come from Herman Hermitage's VC4
* documentation:
diff --git a/include/hw/arm/stm32f205_soc.h b/include/hw/arm/stm32f205_soc.h
index 849d3ed889..5a4f776264 100644
--- a/include/hw/arm/stm32f205_soc.h
+++ b/include/hw/arm/stm32f205_soc.h
@@ -63,7 +63,7 @@ struct STM32F205State {
STM32F2XXADCState adc[STM_NUM_ADCS];
STM32F2XXSPIState spi[STM_NUM_SPIS];
- qemu_or_irq *adc_irqs;
+ OrIRQState *adc_irqs;
MemoryRegion sram;
MemoryRegion flash;
diff --git a/include/hw/arm/stm32f405_soc.h b/include/hw/arm/stm32f405_soc.h
index 249ab5434e..c968ce3ab2 100644
--- a/include/hw/arm/stm32f405_soc.h
+++ b/include/hw/arm/stm32f405_soc.h
@@ -63,7 +63,7 @@ struct STM32F405State {
STM32F4xxExtiState exti;
STM32F2XXUsartState usart[STM_NUM_USARTS];
STM32F2XXTimerState timer[STM_NUM_TIMERS];
- qemu_or_irq adc_irqs;
+ OrIRQState adc_irqs;
STM32F2XXADCState adc[STM_NUM_ADCS];
STM32F2XXSPIState spi[STM_NUM_SPIS];
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
index cbe8a19c10..b6786e9832 100644
--- a/include/hw/arm/xlnx-versal.h
+++ b/include/hw/arm/xlnx-versal.h
@@ -85,7 +85,7 @@ struct Versal {
} rpu;
struct {
- qemu_or_irq irq_orgate;
+ OrIRQState irq_orgate;
XlnxXramCtrl ctrl[XLNX_VERSAL_NR_XRAM];
} xram;
@@ -103,7 +103,7 @@ struct Versal {
XlnxCSUDMA dma_src;
XlnxCSUDMA dma_dst;
MemoryRegion linear_mr;
- qemu_or_irq irq_orgate;
+ OrIRQState irq_orgate;
} ospi;
} iou;
@@ -113,7 +113,7 @@ struct Versal {
XlnxVersalEFuseCtrl efuse_ctrl;
XlnxVersalEFuseCache efuse_cache;
- qemu_or_irq apb_irq_orgate;
+ OrIRQState apb_irq_orgate;
} pmc;
struct {
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
index 20bdf894aa..687c75e3b0 100644
--- a/include/hw/arm/xlnx-zynqmp.h
+++ b/include/hw/arm/xlnx-zynqmp.h
@@ -130,7 +130,7 @@ struct XlnxZynqMPState {
XlnxZDMA gdma[XLNX_ZYNQMP_NUM_GDMA_CH];
XlnxZDMA adma[XLNX_ZYNQMP_NUM_ADMA_CH];
XlnxCSUDMA qspi_dma;
- qemu_or_irq qspi_irq_orgate;
+ OrIRQState qspi_irq_orgate;
XlnxZynqMPAPUCtrl apu_ctrl;
XlnxZynqMPCRF crf;
CadenceTTCState ttc[XLNX_ZYNQMP_NUM_TTC];
diff --git a/include/hw/char/cmsdk-apb-uart.h b/include/hw/char/cmsdk-apb-uart.h
index 64b0a3d534..7de8f8d1b9 100644
--- a/include/hw/char/cmsdk-apb-uart.h
+++ b/include/hw/char/cmsdk-apb-uart.h
@@ -12,10 +12,8 @@
#ifndef CMSDK_APB_UART_H
#define CMSDK_APB_UART_H
-#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
-#include "qapi/error.h"
#include "qom/object.h"
#define TYPE_CMSDK_APB_UART "cmsdk-apb-uart"
@@ -45,36 +43,4 @@ struct CMSDKAPBUART {
uint8_t rxbuf;
};
-/**
- * cmsdk_apb_uart_create - convenience function to create TYPE_CMSDK_APB_UART
- * @addr: location in system memory to map registers
- * @chr: Chardev backend to connect UART to, or NULL if no backend
- * @pclk_frq: frequency in Hz of the PCLK clock (used for calculating baud rate)
- */
-static inline DeviceState *cmsdk_apb_uart_create(hwaddr addr,
- qemu_irq txint,
- qemu_irq rxint,
- qemu_irq txovrint,
- qemu_irq rxovrint,
- qemu_irq uartint,
- Chardev *chr,
- uint32_t pclk_frq)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new(TYPE_CMSDK_APB_UART);
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- qdev_prop_set_uint32(dev, "pclk-frq", pclk_frq);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, txint);
- sysbus_connect_irq(s, 1, rxint);
- sysbus_connect_irq(s, 2, txovrint);
- sysbus_connect_irq(s, 3, rxovrint);
- sysbus_connect_irq(s, 4, uartint);
- return dev;
-}
-
#endif
diff --git a/include/hw/char/pl011.h b/include/hw/char/pl011.h
index 926322e242..d853802132 100644
--- a/include/hw/char/pl011.h
+++ b/include/hw/char/pl011.h
@@ -15,10 +15,8 @@
#ifndef HW_PL011_H
#define HW_PL011_H
-#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "chardev/char-fe.h"
-#include "qapi/error.h"
#include "qom/object.h"
#define TYPE_PL011 "pl011"
@@ -57,38 +55,6 @@ struct PL011State {
const unsigned char *id;
};
-static inline DeviceState *pl011_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new("pl011");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
-
- return dev;
-}
-
-static inline DeviceState *pl011_luminary_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new("pl011_luminary");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
-
- return dev;
-}
+DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr);
#endif
diff --git a/include/hw/char/xilinx_uartlite.h b/include/hw/char/xilinx_uartlite.h
index dd09c06801..36d4e8444d 100644
--- a/include/hw/char/xilinx_uartlite.h
+++ b/include/hw/char/xilinx_uartlite.h
@@ -15,25 +15,9 @@
#ifndef XILINX_UARTLITE_H
#define XILINX_UARTLITE_H
-#include "hw/qdev-properties.h"
-#include "hw/sysbus.h"
-#include "qapi/error.h"
+#include "qom/object.h"
-static inline DeviceState *xilinx_uartlite_create(hwaddr addr,
- qemu_irq irq,
- Chardev *chr)
-{
- DeviceState *dev;
- SysBusDevice *s;
-
- dev = qdev_new("xlnx.xps-uartlite");
- s = SYS_BUS_DEVICE(dev);
- qdev_prop_set_chr(dev, "chardev", chr);
- sysbus_realize_and_unref(s, &error_fatal);
- sysbus_mmio_map(s, 0, addr);
- sysbus_connect_irq(s, 0, irq);
-
- return dev;
-}
+#define TYPE_XILINX_UARTLITE "xlnx.xps-uartlite"
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxUARTLite, XILINX_UARTLITE)
#endif
diff --git a/include/hw/or-irq.h b/include/hw/or-irq.h
index f2f0a27381..c0a42f3711 100644
--- a/include/hw/or-irq.h
+++ b/include/hw/or-irq.h
@@ -35,10 +35,7 @@
*/
#define MAX_OR_LINES 48
-typedef struct OrIRQState qemu_or_irq;
-
-DECLARE_INSTANCE_CHECKER(qemu_or_irq, OR_IRQ,
- TYPE_OR_IRQ)
+OBJECT_DECLARE_SIMPLE_TYPE(OrIRQState, OR_IRQ)
struct OrIRQState {
DeviceState parent_obj;
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
index 511390f60e..a2e4ae9cb0 100644
--- a/include/hw/riscv/boot.h
+++ b/include/hw/riscv/boot.h
@@ -44,9 +44,10 @@ target_ulong riscv_load_firmware(const char *firmware_filename,
hwaddr firmware_load_addr,
symbol_fn_t sym_cb);
target_ulong riscv_load_kernel(MachineState *machine,
+ RISCVHartArrayState *harts,
target_ulong firmware_end_addr,
+ bool load_initrd,
symbol_fn_t sym_cb);
-void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry);
uint64_t riscv_compute_fdt_addr(hwaddr dram_start, uint64_t dram_size,
MachineState *ms);
void riscv_load_fdt(hwaddr fdt_addr, void *fdt);
diff --git a/include/hw/s390x/pv.h b/include/hw/s390x/pv.h
index 9360aa1091..966306a9db 100644
--- a/include/hw/s390x/pv.h
+++ b/include/hw/s390x/pv.h
@@ -41,6 +41,7 @@ static inline bool s390_is_pv(void)
int s390_pv_query_info(void);
int s390_pv_vm_enable(void);
void s390_pv_vm_disable(void);
+bool s390_pv_vm_try_disable_async(void);
int s390_pv_set_sec_parms(uint64_t origin, uint64_t length);
int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak);
void s390_pv_prep_reset(void);
@@ -60,6 +61,7 @@ static inline bool s390_is_pv(void) { return false; }
static inline int s390_pv_query_info(void) { return 0; }
static inline int s390_pv_vm_enable(void) { return 0; }
static inline void s390_pv_vm_disable(void) {}
+static inline bool s390_pv_vm_try_disable_async(void) { return false; }
static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) { return 0; }
static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; }
static inline void s390_pv_prep_reset(void) {}
diff --git a/include/hw/ssi/ibex_spi_host.h b/include/hw/ssi/ibex_spi_host.h
index 8089cc1c31..5bd5557b9a 100644
--- a/include/hw/ssi/ibex_spi_host.h
+++ b/include/hw/ssi/ibex_spi_host.h
@@ -28,7 +28,6 @@
#define IBEX_SPI_HOST_H
#include "hw/sysbus.h"
-#include "hw/hw.h"
#include "hw/ssi/ssi.h"
#include "qemu/fifo8.h"
#include "qom/object.h"
diff --git a/include/hw/timer/cmsdk-apb-timer.h b/include/hw/timer/cmsdk-apb-timer.h
index c4c7eae849..2dd615d1be 100644
--- a/include/hw/timer/cmsdk-apb-timer.h
+++ b/include/hw/timer/cmsdk-apb-timer.h
@@ -12,7 +12,6 @@
#ifndef CMSDK_APB_TIMER_H
#define CMSDK_APB_TIMER_H
-#include "hw/qdev-properties.h"
#include "hw/sysbus.h"
#include "hw/ptimer.h"
#include "hw/clock.h"
diff --git a/include/hw/tricore/tricore_testdevice.h b/include/hw/tricore/tricore_testdevice.h
index 1e2b8942ac..8b4fe15f24 100644
--- a/include/hw/tricore/tricore_testdevice.h
+++ b/include/hw/tricore/tricore_testdevice.h
@@ -19,7 +19,6 @@
#define HW_TRICORE_TESTDEVICE_H
#include "hw/sysbus.h"
-#include "hw/hw.h"
#define TYPE_TRICORE_TESTDEVICE "tricore_testdevice"
#define TRICORE_TESTDEVICE(obj) \
diff --git a/include/qemu/uri.h b/include/qemu/uri.h
index db5218c39e..3ad211d676 100644
--- a/include/qemu/uri.h
+++ b/include/qemu/uri.h
@@ -41,8 +41,7 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library. If not, see <https://www.gnu.org/licenses/>.
*
* Authors:
* Richard W.M. Jones <rjones@redhat.com>
diff --git a/iothread.c b/iothread.c
index 3862a64471..b41c305bd9 100644
--- a/iothread.c
+++ b/iothread.c
@@ -25,10 +25,6 @@
#include "qemu/rcu.h"
#include "qemu/main-loop.h"
-typedef ObjectClass IOThreadClass;
-
-DECLARE_CLASS_CHECKERS(IOThreadClass, IOTHREAD,
- TYPE_IOTHREAD)
#ifdef CONFIG_POSIX
/* Benchmark results from 2016 on NVMe SSD drives show max polling times around
diff --git a/pc-bios/keymaps/meson.build b/pc-bios/keymaps/meson.build
index 06c75e646b..158a3b410c 100644
--- a/pc-bios/keymaps/meson.build
+++ b/pc-bios/keymaps/meson.build
@@ -33,7 +33,7 @@ keymaps = {
'tr': '-l tr',
}
-if meson.is_cross_build() or 'CONFIG_XKBCOMMON' not in config_host
+if meson.is_cross_build() or not xkbcommon.found()
native_qemu_keymap = find_program('qemu-keymap', required: false, disabler: true)
else
native_qemu_keymap = qemu_keymap
diff --git a/qemu-keymap.c b/qemu-keymap.c
index 4095b654a6..229866e004 100644
--- a/qemu-keymap.c
+++ b/qemu-keymap.c
@@ -226,6 +226,8 @@ int main(int argc, char *argv[])
state = xkb_state_new(map);
xkb_keymap_key_for_each(map, walk_map, state);
+ xkb_state_unref(state);
+ state = NULL;
/* add quirks */
fprintf(outfile,
diff --git a/softmmu/vl.c b/softmmu/vl.c
index 6e526d95bb..f29e4c4dc3 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -3264,6 +3264,7 @@ void qemu_init(int argc, char **argv)
vnc_parse(optarg);
break;
case QEMU_OPTION_no_acpi:
+ warn_report("-no-acpi is deprecated, use '-machine acpi=off' instead");
qdict_put_str(machine_opts_dict, "acpi", "off");
break;
case QEMU_OPTION_no_hpet:
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
index b75f813b40..326a03153d 100644
--- a/target/arm/arm-powerctl.c
+++ b/target/arm/arm-powerctl.c
@@ -15,6 +15,7 @@
#include "arm-powerctl.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
+#include "sysemu/tcg.h"
#ifndef DEBUG_ARM_POWERCTL
#define DEBUG_ARM_POWERCTL 0
@@ -127,8 +128,10 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
target_cpu->env.regs[0] = info->context_id;
}
- /* CP15 update requires rebuilding hflags */
- arm_rebuild_hflags(&target_cpu->env);
+ if (tcg_enabled()) {
+ /* CP15 update requires rebuilding hflags */
+ arm_rebuild_hflags(&target_cpu->env);
+ }
/* Start the new CPU at the requested address */
cpu_set_pc(target_cpu_state, info->entry);
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 876ab8f3bf..0b333a749f 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -539,9 +539,12 @@ static void arm_cpu_reset_hold(Object *obj)
}
#endif
- hw_breakpoint_update_all(cpu);
- hw_watchpoint_update_all(cpu);
- arm_rebuild_hflags(env);
+ if (tcg_enabled()) {
+ hw_breakpoint_update_all(cpu);
+ hw_watchpoint_update_all(cpu);
+
+ arm_rebuild_hflags(env);
+ }
}
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
index 3c671c88c1..dfc8b2a1a5 100644
--- a/target/arm/debug_helper.c
+++ b/target/arm/debug_helper.c
@@ -12,8 +12,9 @@
#include "cpregs.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
+#include "sysemu/tcg.h"
-
+#ifdef CONFIG_TCG
/* Return the Exception Level targeted by debug exceptions. */
static int arm_debug_target_el(CPUARMState *env)
{
@@ -536,6 +537,243 @@ void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome)
raise_exception_debug(env, EXCP_UDEF, syndrome);
}
+void hw_watchpoint_update(ARMCPU *cpu, int n)
+{
+ CPUARMState *env = &cpu->env;
+ vaddr len = 0;
+ vaddr wvr = env->cp15.dbgwvr[n];
+ uint64_t wcr = env->cp15.dbgwcr[n];
+ int mask;
+ int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+
+ if (env->cpu_watchpoint[n]) {
+ cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
+ env->cpu_watchpoint[n] = NULL;
+ }
+
+ if (!FIELD_EX64(wcr, DBGWCR, E)) {
+ /* E bit clear : watchpoint disabled */
+ return;
+ }
+
+ switch (FIELD_EX64(wcr, DBGWCR, LSC)) {
+ case 0:
+ /* LSC 00 is reserved and must behave as if the wp is disabled */
+ return;
+ case 1:
+ flags |= BP_MEM_READ;
+ break;
+ case 2:
+ flags |= BP_MEM_WRITE;
+ break;
+ case 3:
+ flags |= BP_MEM_ACCESS;
+ break;
+ }
+
+ /*
+ * Attempts to use both MASK and BAS fields simultaneously are
+ * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
+ * thus generating a watchpoint for every byte in the masked region.
+ */
+ mask = FIELD_EX64(wcr, DBGWCR, MASK);
+ if (mask == 1 || mask == 2) {
+ /*
+ * Reserved values of MASK; we must act as if the mask value was
+ * some non-reserved value, or as if the watchpoint were disabled.
+ * We choose the latter.
+ */
+ return;
+ } else if (mask) {
+ /* Watchpoint covers an aligned area up to 2GB in size */
+ len = 1ULL << mask;
+ /*
+ * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
+ * whether the watchpoint fires when the unmasked bits match; we opt
+ * to generate the exceptions.
+ */
+ wvr &= ~(len - 1);
+ } else {
+ /* Watchpoint covers bytes defined by the byte address select bits */
+ int bas = FIELD_EX64(wcr, DBGWCR, BAS);
+ int basstart;
+
+ if (extract64(wvr, 2, 1)) {
+ /*
+ * Deprecated case of an only 4-aligned address. BAS[7:4] are
+ * ignored, and BAS[3:0] define which bytes to watch.
+ */
+ bas &= 0xf;
+ }
+
+ if (bas == 0) {
+ /* This must act as if the watchpoint is disabled */
+ return;
+ }
+
+ /*
+ * The BAS bits are supposed to be programmed to indicate a contiguous
+ * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
+ * we fire for each byte in the word/doubleword addressed by the WVR.
+ * We choose to ignore any non-zero bits after the first range of 1s.
+ */
+ basstart = ctz32(bas);
+ len = cto32(bas >> basstart);
+ wvr += basstart;
+ }
+
+ cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
+ &env->cpu_watchpoint[n]);
+}
+
+void hw_watchpoint_update_all(ARMCPU *cpu)
+{
+ int i;
+ CPUARMState *env = &cpu->env;
+
+ /*
+ * Completely clear out existing QEMU watchpoints and our array, to
+ * avoid possible stale entries following migration load.
+ */
+ cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
+ memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
+
+ for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
+ hw_watchpoint_update(cpu, i);
+ }
+}
+
+void hw_breakpoint_update(ARMCPU *cpu, int n)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t bvr = env->cp15.dbgbvr[n];
+ uint64_t bcr = env->cp15.dbgbcr[n];
+ vaddr addr;
+ int bt;
+ int flags = BP_CPU;
+
+ if (env->cpu_breakpoint[n]) {
+ cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
+ env->cpu_breakpoint[n] = NULL;
+ }
+
+ if (!extract64(bcr, 0, 1)) {
+ /* E bit clear : watchpoint disabled */
+ return;
+ }
+
+ bt = extract64(bcr, 20, 4);
+
+ switch (bt) {
+ case 4: /* unlinked address mismatch (reserved if AArch64) */
+ case 5: /* linked address mismatch (reserved if AArch64) */
+ qemu_log_mask(LOG_UNIMP,
+ "arm: address mismatch breakpoint types not implemented\n");
+ return;
+ case 0: /* unlinked address match */
+ case 1: /* linked address match */
+ {
+ /*
+ * Bits [1:0] are RES0.
+ *
+ * It is IMPLEMENTATION DEFINED whether bits [63:49]
+ * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
+ * of the VA field ([48] or [52] for FEAT_LVA), or whether the
+ * value is read as written. It is CONSTRAINED UNPREDICTABLE
+ * whether the RESS bits are ignored when comparing an address.
+ * Therefore we are allowed to compare the entire register, which
+ * lets us avoid considering whether FEAT_LVA is actually enabled.
+ *
+ * The BAS field is used to allow setting breakpoints on 16-bit
+ * wide instructions; it is CONSTRAINED UNPREDICTABLE whether
+ * a bp will fire if the addresses covered by the bp and the addresses
+ * covered by the insn overlap but the insn doesn't start at the
+ * start of the bp address range. We choose to require the insn and
+ * the bp to have the same address. The constraints on writing to
+ * BAS enforced in dbgbcr_write mean we have only four cases:
+ * 0b0000 => no breakpoint
+ * 0b0011 => breakpoint on addr
+ * 0b1100 => breakpoint on addr + 2
+ * 0b1111 => breakpoint on addr
+ * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
+ */
+ int bas = extract64(bcr, 5, 4);
+ addr = bvr & ~3ULL;
+ if (bas == 0) {
+ return;
+ }
+ if (bas == 0xc) {
+ addr += 2;
+ }
+ break;
+ }
+ case 2: /* unlinked context ID match */
+ case 8: /* unlinked VMID match (reserved if no EL2) */
+ case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
+ qemu_log_mask(LOG_UNIMP,
+ "arm: unlinked context breakpoint types not implemented\n");
+ return;
+ case 9: /* linked VMID match (reserved if no EL2) */
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
+ case 3: /* linked context ID match */
+ default:
+ /*
+ * We must generate no events for Linked context matches (unless
+ * they are linked to by some other bp/wp, which is handled in
+ * updates for the linking bp/wp). We choose to also generate no events
+ * for reserved values.
+ */
+ return;
+ }
+
+ cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
+}
+
+void hw_breakpoint_update_all(ARMCPU *cpu)
+{
+ int i;
+ CPUARMState *env = &cpu->env;
+
+ /*
+ * Completely clear out existing QEMU breakpoints and our array, to
+ * avoid possible stale entries following migration load.
+ */
+ cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
+ memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
+
+ for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
+ hw_breakpoint_update(cpu, i);
+ }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ /*
+ * In BE32 system mode, target memory is stored byteswapped (on a
+ * little-endian host system), and by the time we reach here (via an
+ * opcode helper) the addresses of subword accesses have been adjusted
+ * to account for that, which means that watchpoints will not match.
+ * Undo the adjustment here.
+ */
+ if (arm_sctlr_b(env)) {
+ if (len == 1) {
+ addr ^= 3;
+ } else if (len == 2) {
+ addr ^= 2;
+ }
+ }
+
+ return addr;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+#endif /* CONFIG_TCG */
+
/*
* Check for traps to "powerdown debug" registers, which are controlled
* by MDCR.TDOSA
@@ -813,112 +1051,6 @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
.access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
};
-void hw_watchpoint_update(ARMCPU *cpu, int n)
-{
- CPUARMState *env = &cpu->env;
- vaddr len = 0;
- vaddr wvr = env->cp15.dbgwvr[n];
- uint64_t wcr = env->cp15.dbgwcr[n];
- int mask;
- int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
-
- if (env->cpu_watchpoint[n]) {
- cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
- env->cpu_watchpoint[n] = NULL;
- }
-
- if (!FIELD_EX64(wcr, DBGWCR, E)) {
- /* E bit clear : watchpoint disabled */
- return;
- }
-
- switch (FIELD_EX64(wcr, DBGWCR, LSC)) {
- case 0:
- /* LSC 00 is reserved and must behave as if the wp is disabled */
- return;
- case 1:
- flags |= BP_MEM_READ;
- break;
- case 2:
- flags |= BP_MEM_WRITE;
- break;
- case 3:
- flags |= BP_MEM_ACCESS;
- break;
- }
-
- /*
- * Attempts to use both MASK and BAS fields simultaneously are
- * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
- * thus generating a watchpoint for every byte in the masked region.
- */
- mask = FIELD_EX64(wcr, DBGWCR, MASK);
- if (mask == 1 || mask == 2) {
- /*
- * Reserved values of MASK; we must act as if the mask value was
- * some non-reserved value, or as if the watchpoint were disabled.
- * We choose the latter.
- */
- return;
- } else if (mask) {
- /* Watchpoint covers an aligned area up to 2GB in size */
- len = 1ULL << mask;
- /*
- * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
- * whether the watchpoint fires when the unmasked bits match; we opt
- * to generate the exceptions.
- */
- wvr &= ~(len - 1);
- } else {
- /* Watchpoint covers bytes defined by the byte address select bits */
- int bas = FIELD_EX64(wcr, DBGWCR, BAS);
- int basstart;
-
- if (extract64(wvr, 2, 1)) {
- /*
- * Deprecated case of an only 4-aligned address. BAS[7:4] are
- * ignored, and BAS[3:0] define which bytes to watch.
- */
- bas &= 0xf;
- }
-
- if (bas == 0) {
- /* This must act as if the watchpoint is disabled */
- return;
- }
-
- /*
- * The BAS bits are supposed to be programmed to indicate a contiguous
- * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
- * we fire for each byte in the word/doubleword addressed by the WVR.
- * We choose to ignore any non-zero bits after the first range of 1s.
- */
- basstart = ctz32(bas);
- len = cto32(bas >> basstart);
- wvr += basstart;
- }
-
- cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
- &env->cpu_watchpoint[n]);
-}
-
-void hw_watchpoint_update_all(ARMCPU *cpu)
-{
- int i;
- CPUARMState *env = &cpu->env;
-
- /*
- * Completely clear out existing QEMU watchpoints and our array, to
- * avoid possible stale entries following migration load.
- */
- cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
- memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
-
- for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
- hw_watchpoint_update(cpu, i);
- }
-}
-
static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -939,7 +1071,9 @@ static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
value &= ~3ULL;
raw_write(env, ri, value);
- hw_watchpoint_update(cpu, i);
+ if (tcg_enabled()) {
+ hw_watchpoint_update(cpu, i);
+ }
}
static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -949,109 +1083,8 @@ static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
int i = ri->crm;
raw_write(env, ri, value);
- hw_watchpoint_update(cpu, i);
-}
-
-void hw_breakpoint_update(ARMCPU *cpu, int n)
-{
- CPUARMState *env = &cpu->env;
- uint64_t bvr = env->cp15.dbgbvr[n];
- uint64_t bcr = env->cp15.dbgbcr[n];
- vaddr addr;
- int bt;
- int flags = BP_CPU;
-
- if (env->cpu_breakpoint[n]) {
- cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
- env->cpu_breakpoint[n] = NULL;
- }
-
- if (!extract64(bcr, 0, 1)) {
- /* E bit clear : watchpoint disabled */
- return;
- }
-
- bt = extract64(bcr, 20, 4);
-
- switch (bt) {
- case 4: /* unlinked address mismatch (reserved if AArch64) */
- case 5: /* linked address mismatch (reserved if AArch64) */
- qemu_log_mask(LOG_UNIMP,
- "arm: address mismatch breakpoint types not implemented\n");
- return;
- case 0: /* unlinked address match */
- case 1: /* linked address match */
- {
- /*
- * Bits [1:0] are RES0.
- *
- * It is IMPLEMENTATION DEFINED whether bits [63:49]
- * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
- * of the VA field ([48] or [52] for FEAT_LVA), or whether the
- * value is read as written. It is CONSTRAINED UNPREDICTABLE
- * whether the RESS bits are ignored when comparing an address.
- * Therefore we are allowed to compare the entire register, which
- * lets us avoid considering whether FEAT_LVA is actually enabled.
- *
- * The BAS field is used to allow setting breakpoints on 16-bit
- * wide instructions; it is CONSTRAINED UNPREDICTABLE whether
- * a bp will fire if the addresses covered by the bp and the addresses
- * covered by the insn overlap but the insn doesn't start at the
- * start of the bp address range. We choose to require the insn and
- * the bp to have the same address. The constraints on writing to
- * BAS enforced in dbgbcr_write mean we have only four cases:
- * 0b0000 => no breakpoint
- * 0b0011 => breakpoint on addr
- * 0b1100 => breakpoint on addr + 2
- * 0b1111 => breakpoint on addr
- * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
- */
- int bas = extract64(bcr, 5, 4);
- addr = bvr & ~3ULL;
- if (bas == 0) {
- return;
- }
- if (bas == 0xc) {
- addr += 2;
- }
- break;
- }
- case 2: /* unlinked context ID match */
- case 8: /* unlinked VMID match (reserved if no EL2) */
- case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
- qemu_log_mask(LOG_UNIMP,
- "arm: unlinked context breakpoint types not implemented\n");
- return;
- case 9: /* linked VMID match (reserved if no EL2) */
- case 11: /* linked context ID and VMID match (reserved if no EL2) */
- case 3: /* linked context ID match */
- default:
- /*
- * We must generate no events for Linked context matches (unless
- * they are linked to by some other bp/wp, which is handled in
- * updates for the linking bp/wp). We choose to also generate no events
- * for reserved values.
- */
- return;
- }
-
- cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
-}
-
-void hw_breakpoint_update_all(ARMCPU *cpu)
-{
- int i;
- CPUARMState *env = &cpu->env;
-
- /*
- * Completely clear out existing QEMU breakpoints and our array, to
- * avoid possible stale entries following migration load.
- */
- cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
- memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
-
- for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
- hw_breakpoint_update(cpu, i);
+ if (tcg_enabled()) {
+ hw_watchpoint_update(cpu, i);
}
}
@@ -1062,7 +1095,9 @@ static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
int i = ri->crm;
raw_write(env, ri, value);
- hw_breakpoint_update(cpu, i);
+ if (tcg_enabled()) {
+ hw_breakpoint_update(cpu, i);
+ }
}
static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -1079,7 +1114,9 @@ static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
value = deposit64(value, 8, 1, extract64(value, 7, 1));
raw_write(env, ri, value);
- hw_breakpoint_update(cpu, i);
+ if (tcg_enabled()) {
+ hw_breakpoint_update(cpu, i);
+ }
}
void define_debug_regs(ARMCPU *cpu)
@@ -1202,30 +1239,3 @@ void define_debug_regs(ARMCPU *cpu)
g_free(dbgwcr_el1_name);
}
}
-
-#if !defined(CONFIG_USER_ONLY)
-
-vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
-
- /*
- * In BE32 system mode, target memory is stored byteswapped (on a
- * little-endian host system), and by the time we reach here (via an
- * opcode helper) the addresses of subword accesses have been adjusted
- * to account for that, which means that watchpoints will not match.
- * Undo the adjustment here.
- */
- if (arm_sctlr_b(env)) {
- if (len == 1) {
- addr ^= 3;
- } else if (len == 2) {
- addr ^= 2;
- }
- }
-
- return addr;
-}
-
-#endif
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 07d4100365..14af7ba095 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -5173,7 +5173,7 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* This may enable/disable the MMU, so do a TLB flush. */
tlb_flush(CPU(cpu));
- if (ri->type & ARM_CP_SUPPRESS_TB_END) {
+ if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
/*
* Normally we would always end the TB on an SCTLR write; see the
* comment in ARMCPRegInfo sctlr initialization below for why Xscale
@@ -6669,32 +6669,6 @@ int sme_exception_el(CPUARMState *env, int el)
return 0;
}
-/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
-static bool sme_fa64(CPUARMState *env, int el)
-{
- if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
- return false;
- }
-
- if (el <= 1 && !el_is_in_host(env, el)) {
- if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
- return false;
- }
- }
- if (el <= 2 && arm_is_el2_enabled(env)) {
- if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
- return false;
- }
- }
- if (arm_feature(env, ARM_FEATURE_EL3)) {
- if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
- return false;
- }
- }
-
- return true;
-}
-
/*
* Given that SVE is enabled, return the vector length for EL.
*/
@@ -6841,7 +6815,9 @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
memset(env->zarray, 0, sizeof(env->zarray));
}
- arm_rebuild_hflags(env);
+ if (tcg_enabled()) {
+ arm_rebuild_hflags(env);
+ }
}
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -9886,7 +9862,7 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
}
mask &= ~CACHED_CPSR_BITS;
env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
- if (rebuild_hflags) {
+ if (tcg_enabled() && rebuild_hflags) {
arm_rebuild_hflags(env);
}
}
@@ -10445,7 +10421,10 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
env->regs[14] = env->regs[15] + offset;
}
env->regs[15] = newpc;
- arm_rebuild_hflags(env);
+
+ if (tcg_enabled()) {
+ arm_rebuild_hflags(env);
+ }
}
static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
@@ -11001,7 +10980,10 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
pstate_write(env, PSTATE_DAIF | new_mode);
env->aarch64 = true;
aarch64_restore_sp(env, new_el);
- helper_rebuild_hflags_a64(env, new_el);
+
+ if (tcg_enabled()) {
+ helper_rebuild_hflags_a64(env, new_el);
+ }
env->pc = addr;
@@ -11142,7 +11124,7 @@ int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
}
}
-static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
+int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
{
if (regime_has_2_ranges(mmu_idx)) {
return extract64(tcr, 57, 2);
@@ -11853,371 +11835,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env)
return arm_mmu_idx_el(env, arm_current_el(env));
}
-static inline bool fgt_svc(CPUARMState *env, int el)
-{
- /*
- * Assuming fine-grained-traps are active, return true if we
- * should be trapping on SVC instructions. Only AArch64 can
- * trap on an SVC at EL1, but we don't need to special-case this
- * because if this is AArch32 EL1 then arm_fgt_active() is false.
- * We also know el is 0 or 1.
- */
- return el == 0 ?
- FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL0) :
- FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1);
-}
-
-static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
- ARMMMUIdx mmu_idx,
- CPUARMTBFlags flags)
-{
- DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
- DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
-
- if (arm_singlestep_active(env)) {
- DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
- }
-
- return flags;
-}
-
-static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
- ARMMMUIdx mmu_idx,
- CPUARMTBFlags flags)
-{
- bool sctlr_b = arm_sctlr_b(env);
-
- if (sctlr_b) {
- DP_TBFLAG_A32(flags, SCTLR__B, 1);
- }
- if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
- DP_TBFLAG_ANY(flags, BE_DATA, 1);
- }
- DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
-
- return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
-}
-
-static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
- ARMMMUIdx mmu_idx)
-{
- CPUARMTBFlags flags = {};
- uint32_t ccr = env->v7m.ccr[env->v7m.secure];
-
- /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
- if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
- DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
- }
-
- if (arm_v7m_is_handler_mode(env)) {
- DP_TBFLAG_M32(flags, HANDLER, 1);
- }
-
- /*
- * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
- * is suppressing them because the requested execution priority
- * is less than 0.
- */
- if (arm_feature(env, ARM_FEATURE_V8) &&
- !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
- (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
- DP_TBFLAG_M32(flags, STACKCHECK, 1);
- }
-
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
- DP_TBFLAG_M32(flags, SECURE, 1);
- }
-
- return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
-}
-
-static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
- ARMMMUIdx mmu_idx)
-{
- CPUARMTBFlags flags = {};
- int el = arm_current_el(env);
-
- if (arm_sctlr(env, el) & SCTLR_A) {
- DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
- }
-
- if (arm_el_is_aa64(env, 1)) {
- DP_TBFLAG_A32(flags, VFPEN, 1);
- }
-
- if (el < 2 && env->cp15.hstr_el2 && arm_is_el2_enabled(env) &&
- (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
- DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
- }
-
- if (arm_fgt_active(env, el)) {
- DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
- if (fgt_svc(env, el)) {
- DP_TBFLAG_ANY(flags, FGT_SVC, 1);
- }
- }
-
- if (env->uncached_cpsr & CPSR_IL) {
- DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
- }
-
- /*
- * The SME exception we are testing for is raised via
- * AArch64.CheckFPAdvSIMDEnabled(), as called from
- * AArch32.CheckAdvSIMDOrFPEnabled().
- */
- if (el == 0
- && FIELD_EX64(env->svcr, SVCR, SM)
- && (!arm_is_el2_enabled(env)
- || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
- && arm_el_is_aa64(env, 1)
- && !sme_fa64(env, el)) {
- DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
- }
-
- return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
-}
-
-static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
- ARMMMUIdx mmu_idx)
-{
- CPUARMTBFlags flags = {};
- ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
- uint64_t tcr = regime_tcr(env, mmu_idx);
- uint64_t sctlr;
- int tbii, tbid;
-
- DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
-
- /* Get control bits for tagged addresses. */
- tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
- tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
-
- DP_TBFLAG_A64(flags, TBII, tbii);
- DP_TBFLAG_A64(flags, TBID, tbid);
-
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
- int sve_el = sve_exception_el(env, el);
-
- /*
- * If either FP or SVE are disabled, translator does not need len.
- * If SVE EL > FP EL, FP exception has precedence, and translator
- * does not need SVE EL. Save potential re-translations by forcing
- * the unneeded data to zero.
- */
- if (fp_el != 0) {
- if (sve_el > fp_el) {
- sve_el = 0;
- }
- } else if (sve_el == 0) {
- DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
- }
- DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
- }
- if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
- int sme_el = sme_exception_el(env, el);
- bool sm = FIELD_EX64(env->svcr, SVCR, SM);
-
- DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
- if (sme_el == 0) {
- /* Similarly, do not compute SVL if SME is disabled. */
- int svl = sve_vqm1_for_el_sm(env, el, true);
- DP_TBFLAG_A64(flags, SVL, svl);
- if (sm) {
- /* If SVE is disabled, we will not have set VL above. */
- DP_TBFLAG_A64(flags, VL, svl);
- }
- }
- if (sm) {
- DP_TBFLAG_A64(flags, PSTATE_SM, 1);
- DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
- }
- DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
- }
-
- sctlr = regime_sctlr(env, stage1);
-
- if (sctlr & SCTLR_A) {
- DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
- }
-
- if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
- DP_TBFLAG_ANY(flags, BE_DATA, 1);
- }
-
- if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
- /*
- * In order to save space in flags, we record only whether
- * pauth is "inactive", meaning all insns are implemented as
- * a nop, or "active" when some action must be performed.
- * The decision of which action to take is left to a helper.
- */
- if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
- DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
- }
- }
-
- if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
- /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
- if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
- DP_TBFLAG_A64(flags, BT, 1);
- }
- }
-
- /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
- if (!(env->pstate & PSTATE_UAO)) {
- switch (mmu_idx) {
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_E10_1_PAN:
- /* TODO: ARMv8.3-NV */
- DP_TBFLAG_A64(flags, UNPRIV, 1);
- break;
- case ARMMMUIdx_E20_2:
- case ARMMMUIdx_E20_2_PAN:
- /*
- * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
- * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
- */
- if (env->cp15.hcr_el2 & HCR_TGE) {
- DP_TBFLAG_A64(flags, UNPRIV, 1);
- }
- break;
- default:
- break;
- }
- }
-
- if (env->pstate & PSTATE_IL) {
- DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
- }
-
- if (arm_fgt_active(env, el)) {
- DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
- if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) {
- DP_TBFLAG_A64(flags, FGT_ERET, 1);
- }
- if (fgt_svc(env, el)) {
- DP_TBFLAG_ANY(flags, FGT_SVC, 1);
- }
- }
-
- if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
- /*
- * Set MTE_ACTIVE if any access may be Checked, and leave clear
- * if all accesses must be Unchecked:
- * 1) If no TBI, then there are no tags in the address to check,
- * 2) If Tag Check Override, then all accesses are Unchecked,
- * 3) If Tag Check Fail == 0, then Checked access have no effect,
- * 4) If no Allocation Tag Access, then all accesses are Unchecked.
- */
- if (allocation_tag_access_enabled(env, el, sctlr)) {
- DP_TBFLAG_A64(flags, ATA, 1);
- if (tbid
- && !(env->pstate & PSTATE_TCO)
- && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
- DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
- }
- }
- /* And again for unprivileged accesses, if required. */
- if (EX_TBFLAG_A64(flags, UNPRIV)
- && tbid
- && !(env->pstate & PSTATE_TCO)
- && (sctlr & SCTLR_TCF0)
- && allocation_tag_access_enabled(env, 0, sctlr)) {
- DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
- }
- /* Cache TCMA as well as TBI. */
- DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
- }
-
- return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
-}
-
-static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
-{
- int el = arm_current_el(env);
- int fp_el = fp_exception_el(env, el);
- ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
-
- if (is_a64(env)) {
- return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
- } else if (arm_feature(env, ARM_FEATURE_M)) {
- return rebuild_hflags_m32(env, fp_el, mmu_idx);
- } else {
- return rebuild_hflags_a32(env, fp_el, mmu_idx);
- }
-}
-
-void arm_rebuild_hflags(CPUARMState *env)
-{
- env->hflags = rebuild_hflags_internal(env);
-}
-
-/*
- * If we have triggered a EL state change we can't rely on the
- * translator having passed it to us, we need to recompute.
- */
-void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
-{
- int el = arm_current_el(env);
- int fp_el = fp_exception_el(env, el);
- ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
-
- env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
-}
-
-void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
-{
- int fp_el = fp_exception_el(env, el);
- ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
-
- env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
-}
-
-/*
- * If we have triggered a EL state change we can't rely on the
- * translator having passed it to us, we need to recompute.
- */
-void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
-{
- int el = arm_current_el(env);
- int fp_el = fp_exception_el(env, el);
- ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
- env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
-}
-
-void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
-{
- int fp_el = fp_exception_el(env, el);
- ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
-
- env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
-}
-
-void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
-{
- int fp_el = fp_exception_el(env, el);
- ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
-
- env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
-}
-
-static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
-{
-#ifdef CONFIG_DEBUG_TCG
- CPUARMTBFlags c = env->hflags;
- CPUARMTBFlags r = rebuild_hflags_internal(env);
-
- if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
- fprintf(stderr, "TCG hflags mismatch "
- "(current:(0x%08x,0x" TARGET_FMT_lx ")"
- " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
- c.flags, c.flags2, r.flags, r.flags2);
- abort();
- }
-#endif
-}
-
static bool mve_no_pred(CPUARMState *env)
{
/*
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 759b70c646..680c574717 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -600,9 +600,6 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
/* Return the MMU index for a v7M CPU in the specified security state */
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
-/* Return true if the translation regime is using LPAE format page tables */
-bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
-
/*
* Return true if the stage 1 translation regime is using LPAE
* format page tables
@@ -767,6 +764,24 @@ static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
return env->cp15.tcr_el[regime_el(env, mmu_idx)];
}
+/* Return true if the translation regime is using LPAE format page tables */
+static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ int el = regime_el(env, mmu_idx);
+ if (el == 2 || arm_el_is_aa64(env, el)) {
+ return true;
+ }
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
+ arm_feature(env, ARM_FEATURE_V8)) {
+ return true;
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)
+ && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
+ return true;
+ }
+ return false;
+}
+
/**
* arm_num_brps: Return number of implemented breakpoints.
* Note that the ID register BRPS field is "number of bps - 1",
@@ -1073,6 +1088,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
+int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
/* Determine if allocation tags are available. */
static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
@@ -1383,4 +1399,5 @@ static inline bool arm_fgt_active(CPUARMState *env, int el)
(!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
}
+void assert_hflags_rebuild_correctly(CPUARMState *env);
#endif
diff --git a/target/arm/machine.c b/target/arm/machine.c
index b4c3850570..fc4a4a4064 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -2,6 +2,7 @@
#include "cpu.h"
#include "qemu/error-report.h"
#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
#include "kvm_arm.h"
#include "internals.h"
#include "migration/cpu.h"
@@ -848,8 +849,10 @@ static int cpu_post_load(void *opaque, int version_id)
return -1;
}
- hw_breakpoint_update_all(cpu);
- hw_watchpoint_update_all(cpu);
+ if (tcg_enabled()) {
+ hw_breakpoint_update_all(cpu);
+ hw_watchpoint_update_all(cpu);
+ }
/*
* TCG gen_update_fp_context() relies on the invariant that
@@ -868,7 +871,10 @@ static int cpu_post_load(void *opaque, int version_id)
if (!kvm_enabled()) {
pmu_op_finish(&cpu->env);
}
- arm_rebuild_hflags(&cpu->env);
+
+ if (tcg_enabled()) {
+ arm_rebuild_hflags(&cpu->env);
+ }
return 0;
}
diff --git a/target/arm/meson.build b/target/arm/meson.build
index 87e911b27f..a5191b57e1 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -1,40 +1,9 @@
-gen = [
- decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
- decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
- decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'),
- decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
- decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
- decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
- decodetree.process('vfp.decode', extra_args: '--decode=disas_vfp'),
- decodetree.process('vfp-uncond.decode', extra_args: '--decode=disas_vfp_uncond'),
- decodetree.process('m-nocp.decode', extra_args: '--decode=disas_m_nocp'),
- decodetree.process('mve.decode', extra_args: '--decode=disas_mve'),
- decodetree.process('a32.decode', extra_args: '--static-decode=disas_a32'),
- decodetree.process('a32-uncond.decode', extra_args: '--static-decode=disas_a32_uncond'),
- decodetree.process('t32.decode', extra_args: '--static-decode=disas_t32'),
- decodetree.process('t16.decode', extra_args: ['-w', '16', '--static-decode=disas_t16']),
-]
-
arm_ss = ss.source_set()
-arm_ss.add(gen)
arm_ss.add(files(
'cpu.c',
- 'crypto_helper.c',
'debug_helper.c',
'gdbstub.c',
'helper.c',
- 'iwmmxt_helper.c',
- 'm_helper.c',
- 'mve_helper.c',
- 'neon_helper.c',
- 'op_helper.c',
- 'tlb_helper.c',
- 'translate.c',
- 'translate-m-nocp.c',
- 'translate-mve.c',
- 'translate-neon.c',
- 'translate-vfp.c',
- 'vec_helper.c',
'vfp_helper.c',
'cpu_tcg.c',
))
@@ -45,14 +14,6 @@ arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c'), if_false: fil
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
'cpu64.c',
'gdbstub64.c',
- 'helper-a64.c',
- 'mte_helper.c',
- 'pauth_helper.c',
- 'sve_helper.c',
- 'sme_helper.c',
- 'translate-a64.c',
- 'translate-sve.c',
- 'translate-sme.c',
))
arm_softmmu_ss = ss.source_set()
@@ -61,11 +22,16 @@ arm_softmmu_ss.add(files(
'arm-powerctl.c',
'machine.c',
'monitor.c',
- 'psci.c',
'ptw.c',
))
subdir('hvf')
+if 'CONFIG_TCG' in config_all
+ subdir('tcg')
+else
+ arm_ss.add(files('tcg-stubs.c'))
+endif
+
target_arch += {'arm': arm_ss}
target_softmmu_arch += {'arm': arm_softmmu_ss}
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 2b125fff44..be0cc6bc15 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -254,6 +254,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
ptw->out_host = NULL;
ptw->out_rw = false;
} else {
+#ifdef CONFIG_TCG
CPUTLBEntryFull *full;
int flags;
@@ -270,6 +271,9 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
ptw->out_rw = full->prot & PAGE_WRITE;
pte_attrs = full->pte_attrs;
pte_secure = full->attrs.secure;
+#else
+ g_assert_not_reached();
+#endif
}
if (regime_is_stage2(s2_mmu_idx)) {
diff --git a/target/arm/tcg-stubs.c b/target/arm/tcg-stubs.c
new file mode 100644
index 0000000000..152b172e24
--- /dev/null
+++ b/target/arm/tcg-stubs.c
@@ -0,0 +1,27 @@
+/*
+ * QEMU ARM stubs for some TCG helper functions
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
+{
+ g_assert_not_reached();
+}
+
+void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
+ uint32_t target_el, uintptr_t ra)
+{
+ g_assert_not_reached();
+}
+/* Temporarily while cpu_get_tb_cpu_state() is still in common code */
+void assert_hflags_rebuild_correctly(CPUARMState *env)
+{
+}
diff --git a/target/arm/a32-uncond.decode b/target/arm/tcg/a32-uncond.decode
index 2339de2e94..2339de2e94 100644
--- a/target/arm/a32-uncond.decode
+++ b/target/arm/tcg/a32-uncond.decode
diff --git a/target/arm/a32.decode b/target/arm/tcg/a32.decode
index f2ca480949..f2ca480949 100644
--- a/target/arm/a32.decode
+++ b/target/arm/tcg/a32.decode
diff --git a/target/arm/crypto_helper.c b/target/arm/tcg/crypto_helper.c
index d28690321f..d28690321f 100644
--- a/target/arm/crypto_helper.c
+++ b/target/arm/tcg/crypto_helper.c
diff --git a/target/arm/helper-a64.c b/target/arm/tcg/helper-a64.c
index 0972a4bdd0..0972a4bdd0 100644
--- a/target/arm/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c
new file mode 100644
index 0000000000..b2ccd77cff
--- /dev/null
+++ b/target/arm/tcg/hflags.c
@@ -0,0 +1,403 @@
+/*
+ * ARM hflags
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/helper-proto.h"
+#include "cpregs.h"
+
+static inline bool fgt_svc(CPUARMState *env, int el)
+{
+ /*
+ * Assuming fine-grained-traps are active, return true if we
+ * should be trapping on SVC instructions. Only AArch64 can
+ * trap on an SVC at EL1, but we don't need to special-case this
+ * because if this is AArch32 EL1 then arm_fgt_active() is false.
+ * We also know el is 0 or 1.
+ */
+ return el == 0 ?
+ FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL0) :
+ FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, SVC_EL1);
+}
+
+static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx,
+ CPUARMTBFlags flags)
+{
+ DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
+ DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
+
+ if (arm_singlestep_active(env)) {
+ DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
+ }
+
+ return flags;
+}
+
+static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx,
+ CPUARMTBFlags flags)
+{
+ bool sctlr_b = arm_sctlr_b(env);
+
+ if (sctlr_b) {
+ DP_TBFLAG_A32(flags, SCTLR__B, 1);
+ }
+ if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
+ }
+ DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
+
+ return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
+}
+
+static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx)
+{
+ CPUARMTBFlags flags = {};
+ uint32_t ccr = env->v7m.ccr[env->v7m.secure];
+
+ /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
+ if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
+ }
+
+ if (arm_v7m_is_handler_mode(env)) {
+ DP_TBFLAG_M32(flags, HANDLER, 1);
+ }
+
+ /*
+ * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
+ * is suppressing them because the requested execution priority
+ * is less than 0.
+ */
+ if (arm_feature(env, ARM_FEATURE_V8) &&
+ !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
+ (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
+ DP_TBFLAG_M32(flags, STACKCHECK, 1);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) && env->v7m.secure) {
+ DP_TBFLAG_M32(flags, SECURE, 1);
+ }
+
+ return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
+}
+
+/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
+static bool sme_fa64(CPUARMState *env, int el)
+{
+ if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
+ return false;
+ }
+
+ if (el <= 1 && !el_is_in_host(env, el)) {
+ if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
+ return false;
+ }
+ }
+ if (el <= 2 && arm_is_el2_enabled(env)) {
+ if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
+ return false;
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx)
+{
+ CPUARMTBFlags flags = {};
+ int el = arm_current_el(env);
+
+ if (arm_sctlr(env, el) & SCTLR_A) {
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
+ }
+
+ if (arm_el_is_aa64(env, 1)) {
+ DP_TBFLAG_A32(flags, VFPEN, 1);
+ }
+
+ if (el < 2 && env->cp15.hstr_el2 && arm_is_el2_enabled(env) &&
+ (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
+ DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
+ }
+
+ if (arm_fgt_active(env, el)) {
+ DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
+ if (fgt_svc(env, el)) {
+ DP_TBFLAG_ANY(flags, FGT_SVC, 1);
+ }
+ }
+
+ if (env->uncached_cpsr & CPSR_IL) {
+ DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
+ }
+
+ /*
+ * The SME exception we are testing for is raised via
+ * AArch64.CheckFPAdvSIMDEnabled(), as called from
+ * AArch32.CheckAdvSIMDOrFPEnabled().
+ */
+ if (el == 0
+ && FIELD_EX64(env->svcr, SVCR, SM)
+ && (!arm_is_el2_enabled(env)
+ || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
+ && arm_el_is_aa64(env, 1)
+ && !sme_fa64(env, el)) {
+ DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
+ }
+
+ return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
+}
+
+static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
+ ARMMMUIdx mmu_idx)
+{
+ CPUARMTBFlags flags = {};
+ ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
+ uint64_t tcr = regime_tcr(env, mmu_idx);
+ uint64_t sctlr;
+ int tbii, tbid;
+
+ DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
+
+ /* Get control bits for tagged addresses. */
+ tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
+ tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
+
+ DP_TBFLAG_A64(flags, TBII, tbii);
+ DP_TBFLAG_A64(flags, TBID, tbid);
+
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
+ int sve_el = sve_exception_el(env, el);
+
+ /*
+ * If either FP or SVE are disabled, translator does not need len.
+ * If SVE EL > FP EL, FP exception has precedence, and translator
+ * does not need SVE EL. Save potential re-translations by forcing
+ * the unneeded data to zero.
+ */
+ if (fp_el != 0) {
+ if (sve_el > fp_el) {
+ sve_el = 0;
+ }
+ } else if (sve_el == 0) {
+ DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
+ }
+ DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
+ }
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ int sme_el = sme_exception_el(env, el);
+ bool sm = FIELD_EX64(env->svcr, SVCR, SM);
+
+ DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
+ if (sme_el == 0) {
+ /* Similarly, do not compute SVL if SME is disabled. */
+ int svl = sve_vqm1_for_el_sm(env, el, true);
+ DP_TBFLAG_A64(flags, SVL, svl);
+ if (sm) {
+ /* If SVE is disabled, we will not have set VL above. */
+ DP_TBFLAG_A64(flags, VL, svl);
+ }
+ }
+ if (sm) {
+ DP_TBFLAG_A64(flags, PSTATE_SM, 1);
+ DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
+ }
+ DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
+ }
+
+ sctlr = regime_sctlr(env, stage1);
+
+ if (sctlr & SCTLR_A) {
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
+ }
+
+ if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
+ }
+
+ if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
+ /*
+ * In order to save space in flags, we record only whether
+ * pauth is "inactive", meaning all insns are implemented as
+ * a nop, or "active" when some action must be performed.
+ * The decision of which action to take is left to a helper.
+ */
+ if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
+ DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
+ }
+ }
+
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
+ /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
+ if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
+ DP_TBFLAG_A64(flags, BT, 1);
+ }
+ }
+
+ /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
+ if (!(env->pstate & PSTATE_UAO)) {
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ /* TODO: ARMv8.3-NV */
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
+ break;
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ /*
+ * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
+ * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
+ */
+ if (env->cp15.hcr_el2 & HCR_TGE) {
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (env->pstate & PSTATE_IL) {
+ DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
+ }
+
+ if (arm_fgt_active(env, el)) {
+ DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1);
+ if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) {
+ DP_TBFLAG_A64(flags, FGT_ERET, 1);
+ }
+ if (fgt_svc(env, el)) {
+ DP_TBFLAG_ANY(flags, FGT_SVC, 1);
+ }
+ }
+
+ if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
+ /*
+ * Set MTE_ACTIVE if any access may be Checked, and leave clear
+ * if all accesses must be Unchecked:
+ * 1) If no TBI, then there are no tags in the address to check,
+ * 2) If Tag Check Override, then all accesses are Unchecked,
+ * 3) If Tag Check Fail == 0, then Checked access have no effect,
+ * 4) If no Allocation Tag Access, then all accesses are Unchecked.
+ */
+ if (allocation_tag_access_enabled(env, el, sctlr)) {
+ DP_TBFLAG_A64(flags, ATA, 1);
+ if (tbid
+ && !(env->pstate & PSTATE_TCO)
+ && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
+ DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
+ }
+ }
+ /* And again for unprivileged accesses, if required. */
+ if (EX_TBFLAG_A64(flags, UNPRIV)
+ && tbid
+ && !(env->pstate & PSTATE_TCO)
+ && (sctlr & SCTLR_TCF0)
+ && allocation_tag_access_enabled(env, 0, sctlr)) {
+ DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
+ }
+ /* Cache TCMA as well as TBI. */
+ DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
+ }
+
+ return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
+}
+
+static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ if (is_a64(env)) {
+ return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
+ } else if (arm_feature(env, ARM_FEATURE_M)) {
+ return rebuild_hflags_m32(env, fp_el, mmu_idx);
+ } else {
+ return rebuild_hflags_a32(env, fp_el, mmu_idx);
+ }
+}
+
+void arm_rebuild_hflags(CPUARMState *env)
+{
+ env->hflags = rebuild_hflags_internal(env);
+}
+
+/*
+ * If we have triggered a EL state change we can't rely on the
+ * translator having passed it to us, we need to recompute.
+ */
+void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
+}
+
+void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
+{
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
+}
+
+/*
+ * If we have triggered a EL state change we can't rely on the
+ * translator having passed it to us, we need to recompute.
+ */
+void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+ env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
+}
+
+void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
+{
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
+}
+
+void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
+{
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
+}
+
+void assert_hflags_rebuild_correctly(CPUARMState *env)
+{
+#ifdef CONFIG_DEBUG_TCG
+ CPUARMTBFlags c = env->hflags;
+ CPUARMTBFlags r = rebuild_hflags_internal(env);
+
+ if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
+ fprintf(stderr, "TCG hflags mismatch "
+ "(current:(0x%08x,0x" TARGET_FMT_lx ")"
+ " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
+ c.flags, c.flags2, r.flags, r.flags2);
+ abort();
+ }
+#endif
+}
diff --git a/target/arm/iwmmxt_helper.c b/target/arm/tcg/iwmmxt_helper.c
index 610b1b2103..610b1b2103 100644
--- a/target/arm/iwmmxt_helper.c
+++ b/target/arm/tcg/iwmmxt_helper.c
diff --git a/target/arm/m-nocp.decode b/target/arm/tcg/m-nocp.decode
index b65c801c97..b65c801c97 100644
--- a/target/arm/m-nocp.decode
+++ b/target/arm/tcg/m-nocp.decode
diff --git a/target/arm/m_helper.c b/target/arm/tcg/m_helper.c
index f94e87e728..f94e87e728 100644
--- a/target/arm/m_helper.c
+++ b/target/arm/tcg/m_helper.c
diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build
new file mode 100644
index 0000000000..d27e76af6c
--- /dev/null
+++ b/target/arm/tcg/meson.build
@@ -0,0 +1,50 @@
+gen = [
+ decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
+ decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
+ decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'),
+ decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
+ decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
+ decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
+ decodetree.process('vfp.decode', extra_args: '--decode=disas_vfp'),
+ decodetree.process('vfp-uncond.decode', extra_args: '--decode=disas_vfp_uncond'),
+ decodetree.process('m-nocp.decode', extra_args: '--decode=disas_m_nocp'),
+ decodetree.process('mve.decode', extra_args: '--decode=disas_mve'),
+ decodetree.process('a32.decode', extra_args: '--static-decode=disas_a32'),
+ decodetree.process('a32-uncond.decode', extra_args: '--static-decode=disas_a32_uncond'),
+ decodetree.process('t32.decode', extra_args: '--static-decode=disas_t32'),
+ decodetree.process('t16.decode', extra_args: ['-w', '16', '--static-decode=disas_t16']),
+]
+
+arm_ss.add(gen)
+
+arm_ss.add(files(
+ 'translate.c',
+ 'translate-m-nocp.c',
+ 'translate-mve.c',
+ 'translate-neon.c',
+ 'translate-vfp.c',
+ 'crypto_helper.c',
+ 'hflags.c',
+ 'iwmmxt_helper.c',
+ 'm_helper.c',
+ 'mve_helper.c',
+ 'neon_helper.c',
+ 'op_helper.c',
+ 'tlb_helper.c',
+ 'vec_helper.c',
+))
+
+arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
+ 'translate-a64.c',
+ 'translate-sve.c',
+ 'translate-sme.c',
+ 'helper-a64.c',
+ 'mte_helper.c',
+ 'pauth_helper.c',
+ 'sme_helper.c',
+ 'sve_helper.c',
+))
+
+arm_softmmu_ss.add(files(
+ 'psci.c',
+))
diff --git a/target/arm/mte_helper.c b/target/arm/tcg/mte_helper.c
index 98bcf59c22..98bcf59c22 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
diff --git a/target/arm/mve.decode b/target/arm/tcg/mve.decode
index 14a4f39802..14a4f39802 100644
--- a/target/arm/mve.decode
+++ b/target/arm/tcg/mve.decode
diff --git a/target/arm/mve_helper.c b/target/arm/tcg/mve_helper.c
index 403b345ea3..403b345ea3 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/tcg/mve_helper.c
diff --git a/target/arm/neon-dp.decode b/target/arm/tcg/neon-dp.decode
index fd3a01bfa0..fd3a01bfa0 100644
--- a/target/arm/neon-dp.decode
+++ b/target/arm/tcg/neon-dp.decode
diff --git a/target/arm/neon-ls.decode b/target/arm/tcg/neon-ls.decode
index c5f364cbc0..c5f364cbc0 100644
--- a/target/arm/neon-ls.decode
+++ b/target/arm/tcg/neon-ls.decode
diff --git a/target/arm/neon-shared.decode b/target/arm/tcg/neon-shared.decode
index 8e6bd0b61f..8e6bd0b61f 100644
--- a/target/arm/neon-shared.decode
+++ b/target/arm/tcg/neon-shared.decode
diff --git a/target/arm/neon_helper.c b/target/arm/tcg/neon_helper.c
index bc6c4a54e9..bc6c4a54e9 100644
--- a/target/arm/neon_helper.c
+++ b/target/arm/tcg/neon_helper.c
diff --git a/target/arm/op_helper.c b/target/arm/tcg/op_helper.c
index 3baf8004f6..3baf8004f6 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/tcg/op_helper.c
diff --git a/target/arm/pauth_helper.c b/target/arm/tcg/pauth_helper.c
index d0483bf051..d0483bf051 100644
--- a/target/arm/pauth_helper.c
+++ b/target/arm/tcg/pauth_helper.c
diff --git a/target/arm/psci.c b/target/arm/tcg/psci.c
index 6c1239bb96..6c1239bb96 100644
--- a/target/arm/psci.c
+++ b/target/arm/tcg/psci.c
diff --git a/target/arm/sme-fa64.decode b/target/arm/tcg/sme-fa64.decode
index 47708ccc8d..47708ccc8d 100644
--- a/target/arm/sme-fa64.decode
+++ b/target/arm/tcg/sme-fa64.decode
diff --git a/target/arm/sme.decode b/target/arm/tcg/sme.decode
index 628804e37a..628804e37a 100644
--- a/target/arm/sme.decode
+++ b/target/arm/tcg/sme.decode
diff --git a/target/arm/sme_helper.c b/target/arm/tcg/sme_helper.c
index 1e67fcac30..1e67fcac30 100644
--- a/target/arm/sme_helper.c
+++ b/target/arm/tcg/sme_helper.c
diff --git a/target/arm/sve.decode b/target/arm/tcg/sve.decode
index 14b3a69c36..14b3a69c36 100644
--- a/target/arm/sve.decode
+++ b/target/arm/tcg/sve.decode
diff --git a/target/arm/sve_helper.c b/target/arm/tcg/sve_helper.c
index 521fc9b969..521fc9b969 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/tcg/sve_helper.c
diff --git a/target/arm/t16.decode b/target/arm/tcg/t16.decode
index 646c74929d..646c74929d 100644
--- a/target/arm/t16.decode
+++ b/target/arm/tcg/t16.decode
diff --git a/target/arm/t32.decode b/target/arm/tcg/t32.decode
index f21ad0167a..f21ad0167a 100644
--- a/target/arm/t32.decode
+++ b/target/arm/tcg/t32.decode
diff --git a/target/arm/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index 60abcbebe6..31eb77f7df 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -12,24 +12,6 @@
#include "exec/helper-proto.h"
-/* Return true if the translation regime is using LPAE format page tables */
-bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- int el = regime_el(env, mmu_idx);
- if (el == 2 || arm_el_is_aa64(env, el)) {
- return true;
- }
- if (arm_feature(env, ARM_FEATURE_PMSA) &&
- arm_feature(env, ARM_FEATURE_V8)) {
- return true;
- }
- if (arm_feature(env, ARM_FEATURE_LPAE)
- && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
- return true;
- }
- return false;
-}
-
/*
* Returns true if the stage 1 translation regime is using LPAE format page
* tables. Used when raising alignment exceptions, whose FSR changes depending
diff --git a/target/arm/translate-a64.c b/target/arm/tcg/translate-a64.c
index da9f877476..da9f877476 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
diff --git a/target/arm/translate-a64.h b/target/arm/tcg/translate-a64.h
index ad3762d1ac..ad3762d1ac 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/tcg/translate-a64.h
diff --git a/target/arm/translate-m-nocp.c b/target/arm/tcg/translate-m-nocp.c
index 5df7d46120..5df7d46120 100644
--- a/target/arm/translate-m-nocp.c
+++ b/target/arm/tcg/translate-m-nocp.c
diff --git a/target/arm/translate-mve.c b/target/arm/tcg/translate-mve.c
index db7ea3f603..db7ea3f603 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/tcg/translate-mve.c
diff --git a/target/arm/translate-neon.c b/target/arm/tcg/translate-neon.c
index 4016339d46..4016339d46 100644
--- a/target/arm/translate-neon.c
+++ b/target/arm/tcg/translate-neon.c
diff --git a/target/arm/translate-sme.c b/target/arm/tcg/translate-sme.c
index 7b87a9df63..7b87a9df63 100644
--- a/target/arm/translate-sme.c
+++ b/target/arm/tcg/translate-sme.c
diff --git a/target/arm/translate-sve.c b/target/arm/tcg/translate-sve.c
index 621a2abb22..621a2abb22 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/tcg/translate-sve.c
diff --git a/target/arm/translate-vfp.c b/target/arm/tcg/translate-vfp.c
index 5c5d58d2c6..5c5d58d2c6 100644
--- a/target/arm/translate-vfp.c
+++ b/target/arm/tcg/translate-vfp.c
diff --git a/target/arm/translate.c b/target/arm/tcg/translate.c
index c23a3462bf..c23a3462bf 100644
--- a/target/arm/translate.c
+++ b/target/arm/tcg/translate.c
diff --git a/target/arm/translate.h b/target/arm/tcg/translate.h
index 3717824b75..3717824b75 100644
--- a/target/arm/translate.h
+++ b/target/arm/tcg/translate.h
diff --git a/target/arm/vec_helper.c b/target/arm/tcg/vec_helper.c
index f59d3b26ea..f59d3b26ea 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/tcg/vec_helper.c
diff --git a/target/arm/vec_internal.h b/target/arm/tcg/vec_internal.h
index 1f4ed80ff7..1f4ed80ff7 100644
--- a/target/arm/vec_internal.h
+++ b/target/arm/tcg/vec_internal.h
diff --git a/target/arm/vfp-uncond.decode b/target/arm/tcg/vfp-uncond.decode
index 5c50447a66..5c50447a66 100644
--- a/target/arm/vfp-uncond.decode
+++ b/target/arm/tcg/vfp-uncond.decode
diff --git a/target/arm/vfp.decode b/target/arm/tcg/vfp.decode
index 5405e80197..5405e80197 100644
--- a/target/arm/vfp.decode
+++ b/target/arm/tcg/vfp.decode
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 0dd2f0c753..93b52b826c 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -73,7 +73,7 @@ struct isa_ext_data {
*/
static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h),
- ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_12_0, ext_v),
+ ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_10_0, ext_v),
ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr),
ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei),
ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause),
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index ad8d82662c..3a9472a2ff 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -60,7 +60,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
* which is not supported by GVEC. So we set vl_eq_vlmax flag to true
* only when maxsz >= 8 bytes.
*/
- uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
+ uint32_t vlmax = vext_get_vlmax(cpu, env->vtype);
uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
uint32_t maxsz = vlmax << sew;
bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index fa17d7770c..1b0a0c1693 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -3980,20 +3980,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_FRM] = { "frm", fs, read_frm, write_frm },
[CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
/* Vector CSRs */
- [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart,
- .min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat,
- .min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm,
- .min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr,
- .min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_VL] = { "vl", vs, read_vl,
- .min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_VTYPE] = { "vtype", vs, read_vtype,
- .min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_VLENB] = { "vlenb", vs, read_vlenb,
- .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
+ [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
+ [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
+ [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
+ [CSR_VL] = { "vl", vs, read_vl },
+ [CSR_VTYPE] = { "vtype", vs, read_vtype },
+ [CSR_VLENB] = { "vlenb", vs, read_vlenb },
/* User Timers and Counters */
[CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
[CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index d1126a6066..4bc4113531 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -441,9 +441,12 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
}
}
- if ((privs & *allowed_privs) == privs) {
- ret = i;
- }
+ /*
+ * If matching address range was found, the protection bits
+ * defined with PMP must be used. We shouldn't fallback on
+ * finding default privileges.
+ */
+ ret = i;
break;
}
}
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 00de879787..3073c54871 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -5038,7 +5038,7 @@ GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4)
GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8)
#define GEN_VEXT_VSLIE1UP(BITWIDTH, H) \
-static void vslide1up_##BITWIDTH(void *vd, void *v0, target_ulong s1, \
+static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
void *vs2, CPURISCVState *env, uint32_t desc) \
{ \
typedef uint##BITWIDTH##_t ETYPE; \
@@ -5086,7 +5086,7 @@ GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, 32)
GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, 64)
#define GEN_VEXT_VSLIDE1DOWN(BITWIDTH, H) \
-static void vslide1down_##BITWIDTH(void *vd, void *v0, target_ulong s1, \
+static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
void *vs2, CPURISCVState *env, uint32_t desc) \
{ \
typedef uint##BITWIDTH##_t ETYPE; \
diff --git a/target/s390x/arch_dump.c b/target/s390x/arch_dump.c
index a2329141e8..cb98f4894d 100644
--- a/target/s390x/arch_dump.c
+++ b/target/s390x/arch_dump.c
@@ -227,28 +227,28 @@ static int s390x_write_elf64_notes(const char *note_name,
DumpState *s,
const NoteFuncDesc *funcs)
{
- Note note, *notep;
+ g_autofree Note *notep = NULL;
const NoteFuncDesc *nf;
- int note_size, content_size;
+ int note_size, prev_size = 0, content_size;
int ret = -1;
- assert(strlen(note_name) < sizeof(note.name));
+ assert(strlen(note_name) < sizeof(notep->name));
for (nf = funcs; nf->note_contents_func; nf++) {
- notep = &note;
if (nf->pvonly && !s390_is_pv()) {
continue;
}
content_size = nf->note_size_func ? nf->note_size_func() : nf->contents_size;
- note_size = sizeof(note) - sizeof(notep->contents) + content_size;
+ note_size = sizeof(Note) - sizeof(notep->contents) + content_size;
- /* Notes with dynamic sizes need to allocate a note */
- if (nf->note_size_func) {
+ if (prev_size < note_size) {
+ g_free(notep);
notep = g_malloc(note_size);
+ prev_size = note_size;
}
- memset(notep, 0, sizeof(note));
+ memset(notep, 0, note_size);
/* Setup note header data */
notep->hdr.n_descsz = cpu_to_be32(content_size);
@@ -258,15 +258,9 @@ static int s390x_write_elf64_notes(const char *note_name,
/* Get contents and write them out */
(*nf->note_contents_func)(notep, cpu, id);
ret = f(notep, note_size, s);
-
- if (nf->note_size_func) {
- g_free(notep);
- }
-
if (ret < 0) {
return -1;
}
-
}
return 0;
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index d6725fd18c..e51a0db0fe 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -35,6 +35,12 @@
#include "hw/boards.h"
#endif
+#ifdef CONFIG_USER_ONLY
+# define user_or_likely(X) true
+#else
+# define user_or_likely(X) likely(X)
+#endif
+
/*****************************************************************************/
/* Softmmu support */
@@ -114,19 +120,15 @@ static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
typedef struct S390Access {
target_ulong vaddr1;
target_ulong vaddr2;
- char *haddr1;
- char *haddr2;
+ void *haddr1;
+ void *haddr2;
uint16_t size1;
uint16_t size2;
/*
* If we can't access the host page directly, we'll have to do I/O access
* via ld/st helpers. These are internal details, so we store the
* mmu idx to do the access here instead of passing it around in the
- * helpers. Maybe, one day we can get rid of ld/st access - once we can
- * handle TLB_NOTDIRTY differently. We don't expect these special accesses
- * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP
- * pages, we might trigger a new MMU translation - very unlikely that
- * the mapping changes in between and we would trigger a fault.
+ * helpers.
*/
int mmu_idx;
} S390Access;
@@ -138,23 +140,27 @@ typedef struct S390Access {
* For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec.
* For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr.
*/
-static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx,
- bool nonfault, void **phost, uintptr_t ra)
+static inline int s390_probe_access(CPUArchState *env, target_ulong addr,
+ int size, MMUAccessType access_type,
+ int mmu_idx, bool nonfault,
+ void **phost, uintptr_t ra)
{
-#if defined(CONFIG_USER_ONLY)
- return probe_access_flags(env, addr, access_type, mmu_idx,
- nonfault, phost, ra);
-#else
- int flags;
+ int flags = probe_access_flags(env, addr, access_type, mmu_idx,
+ nonfault, phost, ra);
- env->tlb_fill_exc = 0;
- flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost,
- ra);
- if (env->tlb_fill_exc) {
+ if (unlikely(flags & TLB_INVALID_MASK)) {
+ assert(!nonfault);
+#ifdef CONFIG_USER_ONLY
+ /* Address is in TEC in system mode; see s390_cpu_record_sigsegv. */
+ env->__excp_addr = addr & TARGET_PAGE_MASK;
+ return (page_get_flags(addr) & PAGE_VALID
+ ? PGM_PROTECTION : PGM_ADDRESSING);
+#else
return env->tlb_fill_exc;
+#endif
}
+#ifndef CONFIG_USER_ONLY
if (unlikely(flags & TLB_WATCHPOINT)) {
/* S390 does not presently use transaction attributes. */
cpu_check_watchpoint(env_cpu(env), addr, size,
@@ -162,8 +168,9 @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
(access_type == MMU_DATA_STORE
? BP_MEM_WRITE : BP_MEM_READ), ra);
}
- return 0;
#endif
+
+ return 0;
}
static int access_prepare_nf(S390Access *access, CPUS390XState *env,
@@ -171,51 +178,46 @@ static int access_prepare_nf(S390Access *access, CPUS390XState *env,
MMUAccessType access_type,
int mmu_idx, uintptr_t ra)
{
- void *haddr1, *haddr2 = NULL;
int size1, size2, exc;
- vaddr vaddr2 = 0;
assert(size > 0 && size <= 4096);
size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)),
size2 = size - size1;
+ memset(access, 0, sizeof(*access));
+ access->vaddr1 = vaddr1;
+ access->size1 = size1;
+ access->size2 = size2;
+ access->mmu_idx = mmu_idx;
+
exc = s390_probe_access(env, vaddr1, size1, access_type, mmu_idx, nonfault,
- &haddr1, ra);
- if (exc) {
+ &access->haddr1, ra);
+ if (unlikely(exc)) {
return exc;
}
if (unlikely(size2)) {
/* The access crosses page boundaries. */
- vaddr2 = wrap_address(env, vaddr1 + size1);
+ vaddr vaddr2 = wrap_address(env, vaddr1 + size1);
+
+ access->vaddr2 = vaddr2;
exc = s390_probe_access(env, vaddr2, size2, access_type, mmu_idx,
- nonfault, &haddr2, ra);
- if (exc) {
+ nonfault, &access->haddr2, ra);
+ if (unlikely(exc)) {
return exc;
}
}
-
- *access = (S390Access) {
- .vaddr1 = vaddr1,
- .vaddr2 = vaddr2,
- .haddr1 = haddr1,
- .haddr2 = haddr2,
- .size1 = size1,
- .size2 = size2,
- .mmu_idx = mmu_idx
- };
return 0;
}
-static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size,
- MMUAccessType access_type, int mmu_idx,
- uintptr_t ra)
+static inline void access_prepare(S390Access *ret, CPUS390XState *env,
+ vaddr vaddr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t ra)
{
- S390Access ret;
- int exc = access_prepare_nf(&ret, env, false, vaddr, size,
+ int exc = access_prepare_nf(ret, env, false, vaddr, size,
access_type, mmu_idx, ra);
assert(!exc);
- return ret;
}
/* Helper to handle memset on a single page. */
@@ -224,28 +226,14 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
- g_assert(haddr);
memset(haddr, byte, size);
#else
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- int i;
-
if (likely(haddr)) {
memset(haddr, byte, size);
} else {
- /*
- * Do a single access and test if we can then get access to the
- * page. This is especially relevant to speed up TLB_NOTDIRTY.
- */
- g_assert(size > 0);
- cpu_stb_mmu(env, vaddr, byte, oi, ra);
- haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
- if (likely(haddr)) {
- memset(haddr + 1, byte, size - 1);
- } else {
- for (i = 1; i < size; i++) {
- cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
- }
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ for (int i = 0; i < size; i++) {
+ cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
}
}
#endif
@@ -264,70 +252,43 @@ static void access_memset(CPUS390XState *env, S390Access *desta,
desta->mmu_idx, ra);
}
-static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
- int offset, int mmu_idx, uintptr_t ra)
-{
-#ifdef CONFIG_USER_ONLY
- return ldub_p(*haddr + offset);
-#else
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- uint8_t byte;
-
- if (likely(*haddr)) {
- return ldub_p(*haddr + offset);
- }
- /*
- * Do a single access and test if we can then get access to the
- * page. This is especially relevant to speed up TLB_NOTDIRTY.
- */
- byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra);
- *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
- return byte;
-#endif
-}
-
static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
int offset, uintptr_t ra)
{
- if (offset < access->size1) {
- return do_access_get_byte(env, access->vaddr1, &access->haddr1,
- offset, access->mmu_idx, ra);
- }
- return do_access_get_byte(env, access->vaddr2, &access->haddr2,
- offset - access->size1, access->mmu_idx, ra);
-}
+ target_ulong vaddr = access->vaddr1;
+ void *haddr = access->haddr1;
-static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
- int offset, uint8_t byte, int mmu_idx,
- uintptr_t ra)
-{
-#ifdef CONFIG_USER_ONLY
- stb_p(*haddr + offset, byte);
-#else
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ if (unlikely(offset >= access->size1)) {
+ offset -= access->size1;
+ vaddr = access->vaddr2;
+ haddr = access->haddr2;
+ }
- if (likely(*haddr)) {
- stb_p(*haddr + offset, byte);
- return;
+ if (user_or_likely(haddr)) {
+ return ldub_p(haddr + offset);
+ } else {
+ MemOpIdx oi = make_memop_idx(MO_UB, access->mmu_idx);
+ return cpu_ldb_mmu(env, vaddr + offset, oi, ra);
}
- /*
- * Do a single access and test if we can then get access to the
- * page. This is especially relevant to speed up TLB_NOTDIRTY.
- */
- cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
- *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
-#endif
}
static void access_set_byte(CPUS390XState *env, S390Access *access,
int offset, uint8_t byte, uintptr_t ra)
{
- if (offset < access->size1) {
- do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte,
- access->mmu_idx, ra);
+ target_ulong vaddr = access->vaddr1;
+ void *haddr = access->haddr1;
+
+ if (unlikely(offset >= access->size1)) {
+ offset -= access->size1;
+ vaddr = access->vaddr2;
+ haddr = access->haddr2;
+ }
+
+ if (user_or_likely(haddr)) {
+ stb_p(haddr + offset, byte);
} else {
- do_access_set_byte(env, access->vaddr2, &access->haddr2,
- offset - access->size1, byte, access->mmu_idx, ra);
+ MemOpIdx oi = make_memop_idx(MO_UB, access->mmu_idx);
+ cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
}
}
@@ -338,16 +299,17 @@ static void access_set_byte(CPUS390XState *env, S390Access *access,
static void access_memmove(CPUS390XState *env, S390Access *desta,
S390Access *srca, uintptr_t ra)
{
+ int len = desta->size1 + desta->size2;
int diff;
- g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2);
+ assert(len == srca->size1 + srca->size2);
/* Fallback to slow access in case we don't have access to all host pages */
if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) ||
!srca->haddr1 || (srca->size2 && !srca->haddr2))) {
int i;
- for (i = 0; i < desta->size1 + desta->size2; i++) {
+ for (i = 0; i < len; i++) {
uint8_t byte = access_get_byte(env, srca, i, ra);
access_set_byte(env, desta, i, byte, ra);
@@ -355,20 +317,20 @@ static void access_memmove(CPUS390XState *env, S390Access *desta,
return;
}
- if (srca->size1 == desta->size1) {
+ diff = desta->size1 - srca->size1;
+ if (likely(diff == 0)) {
memmove(desta->haddr1, srca->haddr1, srca->size1);
if (unlikely(srca->size2)) {
memmove(desta->haddr2, srca->haddr2, srca->size2);
}
- } else if (srca->size1 < desta->size1) {
- diff = desta->size1 - srca->size1;
+ } else if (diff > 0) {
memmove(desta->haddr1, srca->haddr1, srca->size1);
memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
if (likely(desta->size2)) {
memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
}
} else {
- diff = srca->size1 - desta->size1;
+ diff = -diff;
memmove(desta->haddr1, srca->haddr1, desta->size1);
memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
if (likely(srca->size2)) {
@@ -407,9 +369,9 @@ static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
/* NC always processes one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca1, i, ra) &
access_get_byte(env, &srca2, i, ra);
@@ -441,9 +403,9 @@ static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
/* XC always processes one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
/* xor with itself is the same as memset(0) */
if (src == dest) {
@@ -482,9 +444,9 @@ static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
/* OC always processes one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca1, i, ra) |
access_get_byte(env, &srca2, i, ra);
@@ -515,8 +477,8 @@ static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
/* MVC always copies one more byte than specified - maximum is 256 */
l++;
- srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
/*
* "When the operands overlap, the result is obtained as if the operands
@@ -554,8 +516,8 @@ void HELPER(mvcrl)(CPUS390XState *env, uint64_t l, uint64_t dest, uint64_t src)
/* MVCRL always copies one more byte than specified - maximum is 256 */
l++;
- srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = l - 1; i >= 0; i--) {
uint8_t byte = access_get_byte(env, &srca, i, ra);
@@ -575,8 +537,8 @@ void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
l++;
src = wrap_address(env, src - l + 1);
- srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra);
@@ -595,9 +557,9 @@ void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
/* MVN always copies one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) |
(access_get_byte(env, &srca2, i, ra) & 0xf0);
@@ -618,8 +580,8 @@ void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
S390Access srca, desta;
int i, j;
- srca = access_prepare(env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra);
/* Handle rightmost byte */
byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra);
@@ -651,9 +613,9 @@ void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
/* MVZ always copies one more byte than specified - maximum is 256 */
l++;
- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca1, env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&srca2, env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < l; i++) {
const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) |
(access_get_byte(env, &srca2, i, ra) & 0x0f);
@@ -997,8 +959,8 @@ uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
* this point). We might over-indicate watchpoints within the pages
* (if we ever care, we have to limit processing to a single byte).
*/
- srca = access_prepare(env, s, len, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, d, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, s, len, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, d, len, MMU_DATA_STORE, mmu_idx, ra);
for (i = 0; i < len; i++) {
const uint8_t v = access_get_byte(env, &srca, i, ra);
@@ -1085,19 +1047,19 @@ static inline uint32_t do_mvcl(CPUS390XState *env,
len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len);
*destlen -= len;
*srclen -= len;
- srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
- desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&srca, env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
access_memmove(env, &desta, &srca, ra);
*src = wrap_address(env, *src + len);
*dest = wrap_address(env, *dest + len);
} else if (wordsize == 1) {
/* Pad the remaining area */
*destlen -= len;
- desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
access_memset(env, &desta, pad, ra);
*dest = wrap_address(env, *dest + len);
} else {
- desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_prepare(&desta, env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
/* The remaining length selects the padding byte. */
for (i = 0; i < len; (*destlen)--, i++) {
@@ -1153,16 +1115,16 @@ uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
while (destlen) {
cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK));
if (!srclen) {
- desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
- ra);
+ access_prepare(&desta, env, dest, cur_len,
+ MMU_DATA_STORE, mmu_idx, ra);
access_memset(env, &desta, pad, ra);
} else {
cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len);
- srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx,
- ra);
- desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
- ra);
+ access_prepare(&srca, env, src, cur_len,
+ MMU_DATA_LOAD, mmu_idx, ra);
+ access_prepare(&desta, env, dest, cur_len,
+ MMU_DATA_STORE, mmu_idx, ra);
access_memmove(env, &desta, &srca, ra);
src = wrap_address(env, src + cur_len);
srclen -= cur_len;
@@ -2267,8 +2229,8 @@ uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2,
return cc;
}
- srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
- desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
+ access_prepare(&srca, env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
+ access_prepare(&desta, env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
access_memmove(env, &desta, &srca, ra);
return cc;
}
@@ -2301,9 +2263,8 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2,
} else if (!l) {
return cc;
}
-
- srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
- desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
+ access_prepare(&srca, env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
+ access_prepare(&desta, env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
access_memmove(env, &desta, &srca, ra);
return cc;
}
@@ -2644,10 +2605,12 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
/* FIXME: Access using correct keys and AR-mode */
if (len) {
- S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD,
- mmu_idx_from_as(src_as), ra);
- S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE,
- mmu_idx_from_as(dest_as), ra);
+ S390Access srca, desta;
+
+ access_prepare(&srca, env, src, len, MMU_DATA_LOAD,
+ mmu_idx_from_as(src_as), ra);
+ access_prepare(&desta, env, dest, len, MMU_DATA_STORE,
+ mmu_idx_from_as(dest_as), ra);
access_memmove(env, &desta, &srca, ra);
}
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index ac5bd98f04..faa6f737ba 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -171,8 +171,6 @@ static uint64_t inline_branch_miss[CC_OP_MAX];
static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
{
- TCGv_i64 tmp;
-
if (s->base.tb->flags & FLAG_MASK_32) {
if (s->base.tb->flags & FLAG_MASK_64) {
tcg_gen_movi_i64(out, pc);
@@ -181,9 +179,7 @@ static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
pc |= 0x80000000;
}
assert(!(s->base.tb->flags & FLAG_MASK_64));
- tmp = tcg_const_i64(pc);
- tcg_gen_deposit_i64(out, out, tmp, 0, 32);
- tcg_temp_free_i64(tmp);
+ tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
}
static TCGv_i64 psw_addr;
@@ -360,11 +356,8 @@ static void per_branch(DisasContext *s, bool to_next)
tcg_gen_movi_i64(gbea, s->base.pc_next);
if (s->base.tb->flags & FLAG_MASK_PER) {
- TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
+ TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
gen_helper_per_branch(cpu_env, gbea, next_pc);
- if (to_next) {
- tcg_temp_free_i64(next_pc);
- }
}
#endif
}
@@ -382,9 +375,8 @@ static void per_branch_cond(DisasContext *s, TCGCond cond,
gen_set_label(lab);
} else {
- TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
+ TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
- tcg_temp_free_i64(pc);
}
#endif
}
@@ -438,23 +430,17 @@ static int get_mem_index(DisasContext *s)
static void gen_exception(int excp)
{
- TCGv_i32 tmp = tcg_const_i32(excp);
- gen_helper_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
+ gen_helper_exception(cpu_env, tcg_constant_i32(excp));
}
static void gen_program_exception(DisasContext *s, int code)
{
- TCGv_i32 tmp;
-
- /* Remember what pgm exception this was. */
- tmp = tcg_const_i32(code);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
- tcg_temp_free_i32(tmp);
+ /* Remember what pgm exeption this was. */
+ tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
+ offsetof(CPUS390XState, int_pgm_code));
- tmp = tcg_const_i32(s->ilen);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
- tcg_temp_free_i32(tmp);
+ tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
+ offsetof(CPUS390XState, int_pgm_ilen));
/* update the psw */
update_psw_addr(s);
@@ -473,9 +459,7 @@ static inline void gen_illegal_opcode(DisasContext *s)
static inline void gen_data_exception(uint8_t dxc)
{
- TCGv_i32 tmp = tcg_const_i32(dxc);
- gen_helper_data_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
+ gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
}
static inline void gen_trap(DisasContext *s)
@@ -596,13 +580,13 @@ static void gen_op_calc_cc(DisasContext *s)
switch (s->cc_op) {
default:
- dummy = tcg_const_i64(0);
+ dummy = tcg_constant_i64(0);
/* FALLTHRU */
case CC_OP_ADD_64:
case CC_OP_SUB_64:
case CC_OP_ADD_32:
case CC_OP_SUB_32:
- local_cc_op = tcg_const_i32(s->cc_op);
+ local_cc_op = tcg_constant_i32(s->cc_op);
break;
case CC_OP_CONST0:
case CC_OP_CONST1:
@@ -675,13 +659,6 @@ static void gen_op_calc_cc(DisasContext *s)
tcg_abort();
}
- if (local_cc_op) {
- tcg_temp_free_i32(local_cc_op);
- }
- if (dummy) {
- tcg_temp_free_i64(dummy);
- }
-
/* We now have cc in cc_op as constant */
set_cc_static(s);
}
@@ -868,7 +845,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
c->is_64 = false;
c->u.s32.a = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
break;
case CC_OP_LTGT_32:
case CC_OP_LTUGTU_32:
@@ -883,7 +860,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_NZ:
case CC_OP_FLOGR:
c->u.s64.a = cc_dst;
- c->u.s64.b = tcg_const_i64(0);
+ c->u.s64.b = tcg_constant_i64(0);
c->g1 = true;
break;
case CC_OP_LTGT_64:
@@ -897,14 +874,14 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_TM_64:
case CC_OP_ICM:
c->u.s64.a = tcg_temp_new_i64();
- c->u.s64.b = tcg_const_i64(0);
+ c->u.s64.b = tcg_constant_i64(0);
tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
break;
case CC_OP_ADDU:
case CC_OP_SUBU:
c->is_64 = true;
- c->u.s64.b = tcg_const_i64(0);
+ c->u.s64.b = tcg_constant_i64(0);
c->g1 = true;
switch (mask) {
case 8 | 2:
@@ -927,65 +904,65 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
switch (mask) {
case 0x8 | 0x4 | 0x2: /* cc != 3 */
cond = TCG_COND_NE;
- c->u.s32.b = tcg_const_i32(3);
+ c->u.s32.b = tcg_constant_i32(3);
break;
case 0x8 | 0x4 | 0x1: /* cc != 2 */
cond = TCG_COND_NE;
- c->u.s32.b = tcg_const_i32(2);
+ c->u.s32.b = tcg_constant_i32(2);
break;
case 0x8 | 0x2 | 0x1: /* cc != 1 */
cond = TCG_COND_NE;
- c->u.s32.b = tcg_const_i32(1);
+ c->u.s32.b = tcg_constant_i32(1);
break;
case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
cond = TCG_COND_EQ;
c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
break;
case 0x8 | 0x4: /* cc < 2 */
cond = TCG_COND_LTU;
- c->u.s32.b = tcg_const_i32(2);
+ c->u.s32.b = tcg_constant_i32(2);
break;
case 0x8: /* cc == 0 */
cond = TCG_COND_EQ;
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
break;
case 0x4 | 0x2 | 0x1: /* cc != 0 */
cond = TCG_COND_NE;
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
break;
case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
cond = TCG_COND_NE;
c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
- c->u.s32.b = tcg_const_i32(0);
+ c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
break;
case 0x4: /* cc == 1 */
cond = TCG_COND_EQ;
- c->u.s32.b = tcg_const_i32(1);
+ c->u.s32.b = tcg_constant_i32(1);
break;
case 0x2 | 0x1: /* cc > 1 */
cond = TCG_COND_GTU;
- c->u.s32.b = tcg_const_i32(1);
+ c->u.s32.b = tcg_constant_i32(1);
break;
case 0x2: /* cc == 2 */
cond = TCG_COND_EQ;
- c->u.s32.b = tcg_const_i32(2);
+ c->u.s32.b = tcg_constant_i32(2);
break;
case 0x1: /* cc == 3 */
cond = TCG_COND_EQ;
- c->u.s32.b = tcg_const_i32(3);
+ c->u.s32.b = tcg_constant_i32(3);
break;
default:
/* CC is masked by something else: (8 >> cc) & mask. */
cond = TCG_COND_NE;
c->g1 = false;
- c->u.s32.a = tcg_const_i32(8);
- c->u.s32.b = tcg_const_i32(0);
- tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
+ c->u.s32.a = tcg_temp_new_i32();
+ c->u.s32.b = tcg_constant_i32(0);
+ tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
break;
}
@@ -1300,9 +1277,9 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
Most commonly we're single-stepping or some other condition that
disables all use of goto_tb. Just update the PC and exit. */
- TCGv_i64 next = tcg_const_i64(s->pc_tmp);
+ TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
if (is_imm) {
- cdest = tcg_const_i64(dest);
+ cdest = tcg_constant_i64(dest);
}
if (c->is_64) {
@@ -1312,21 +1289,15 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 z = tcg_const_i64(0);
+ TCGv_i64 z = tcg_constant_i64(0);
tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
tcg_gen_extu_i32_i64(t1, t0);
tcg_temp_free_i32(t0);
tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
per_branch_cond(s, TCG_COND_NE, t1, z);
tcg_temp_free_i64(t1);
- tcg_temp_free_i64(z);
}
- if (is_imm) {
- tcg_temp_free_i64(cdest);
- }
- tcg_temp_free_i64(next);
-
ret = DISAS_PC_UPDATED;
}
@@ -1410,10 +1381,9 @@ static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
{
compute_carry(s);
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
- tcg_temp_free_i64(zero);
return DISAS_NEXT;
}
@@ -1649,7 +1619,7 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
tcg_gen_subi_i64(t, regs[r1], 1);
store_reg32_i64(r1, t);
c.u.s32.a = tcg_temp_new_i32();
- c.u.s32.b = tcg_const_i32(0);
+ c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
tcg_temp_free_i64(t);
@@ -1673,7 +1643,7 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
tcg_gen_subi_i64(t, t, 1);
store_reg32h_i64(r1, t);
c.u.s32.a = tcg_temp_new_i32();
- c.u.s32.b = tcg_const_i32(0);
+ c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
tcg_temp_free_i64(t);
@@ -1694,7 +1664,7 @@ static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
tcg_gen_subi_i64(regs[r1], regs[r1], 1);
c.u.s64.a = regs[r1];
- c.u.s64.b = tcg_const_i64(0);
+ c.u.s64.b = tcg_constant_i64(0);
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1820,7 +1790,7 @@ static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
return NULL;
}
- return tcg_const_i32(deposit32(m3, 4, 4, m4));
+ return tcg_constant_i32(deposit32(m3, 4, 4, m4));
}
static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
@@ -1831,7 +1801,6 @@ static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1844,7 +1813,6 @@ static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1857,7 +1825,6 @@ static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1870,7 +1837,6 @@ static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1883,7 +1849,6 @@ static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1896,7 +1861,6 @@ static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1909,7 +1873,6 @@ static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1922,7 +1885,6 @@ static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1935,7 +1897,6 @@ static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1948,7 +1909,6 @@ static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1961,7 +1921,6 @@ static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1974,7 +1933,6 @@ static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -1987,7 +1945,6 @@ static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cegb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -1999,7 +1956,6 @@ static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2011,7 +1967,6 @@ static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2023,7 +1978,6 @@ static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_celgb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2035,7 +1989,6 @@ static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2047,7 +2000,6 @@ static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2092,9 +2044,8 @@ static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
break;
default:
- vl = tcg_const_i32(l);
+ vl = tcg_constant_i32(l);
gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
- tcg_temp_free_i32(vl);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2114,11 +2065,9 @@ static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t2 = tcg_const_i32(r2);
+ t1 = tcg_constant_i32(r1);
+ t2 = tcg_constant_i32(r2);
gen_helper_clcl(cc_op, cpu_env, t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2135,11 +2084,9 @@ static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t3 = tcg_const_i32(r3);
+ t1 = tcg_constant_i32(r1);
+ t3 = tcg_constant_i32(r3);
gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2156,24 +2103,22 @@ static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t3 = tcg_const_i32(r3);
+ t1 = tcg_constant_i32(r1);
+ t3 = tcg_constant_i32(r3);
gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t3);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
{
- TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
+ TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
TCGv_i32 t1 = tcg_temp_new_i32();
+
tcg_gen_extrl_i64_i32(t1, o->in1);
gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
set_cc_static(s);
tcg_temp_free_i32(t1);
- tcg_temp_free_i32(m3);
return DISAS_NEXT;
}
@@ -2251,14 +2196,13 @@ static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
{
int r3 = get_field(s, r3);
- TCGv_i32 t_r3 = tcg_const_i32(r3);
+ TCGv_i32 t_r3 = tcg_constant_i32(r3);
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
} else {
gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
}
- tcg_temp_free_i32(t_r3);
set_cc_static(s);
return DISAS_NEXT;
@@ -2356,9 +2300,9 @@ static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
m3 = 0;
}
- tr1 = tcg_const_i32(r1);
- tr2 = tcg_const_i32(r2);
- chk = tcg_const_i32(m3);
+ tr1 = tcg_constant_i32(r1);
+ tr2 = tcg_constant_i32(r2);
+ chk = tcg_constant_i32(m3);
switch (s->insn->data) {
case 12:
@@ -2383,9 +2327,6 @@ static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
g_assert_not_reached();
}
- tcg_temp_free_i32(tr1);
- tcg_temp_free_i32(tr2);
- tcg_temp_free_i32(chk);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -2393,15 +2334,11 @@ static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
- TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+ TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
gen_helper_diag(cpu_env, r1, r3, func_code);
-
- tcg_temp_free_i32(func_code);
- tcg_temp_free_i32(r3);
- tcg_temp_free_i32(r1);
return DISAS_NEXT;
}
#endif
@@ -2512,18 +2449,13 @@ static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
update_cc_op(s);
if (r1 == 0) {
- v1 = tcg_const_i64(0);
+ v1 = tcg_constant_i64(0);
} else {
v1 = regs[r1];
}
- ilen = tcg_const_i32(s->ilen);
+ ilen = tcg_constant_i32(s->ilen);
gen_helper_ex(cpu_env, ilen, v1, o->in2);
- tcg_temp_free_i32(ilen);
-
- if (r1 == 0) {
- tcg_temp_free_i64(v1);
- }
return DISAS_PC_CC_UPDATED;
}
@@ -2536,7 +2468,6 @@ static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_fieb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2548,7 +2479,6 @@ static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_fidb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2560,7 +2490,6 @@ static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2674,12 +2603,11 @@ static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
TCGv_i32 m4;
if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
- m4 = tcg_const_i32(get_field(s, m4));
+ m4 = tcg_constant_i32(get_field(s, m4));
} else {
- m4 = tcg_const_i32(0);
+ m4 = tcg_constant_i32(0);
}
gen_helper_idte(cpu_env, o->in1, o->in2, m4);
- tcg_temp_free_i32(m4);
return DISAS_NEXT;
}
@@ -2688,12 +2616,11 @@ static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
TCGv_i32 m4;
if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
- m4 = tcg_const_i32(get_field(s, m4));
+ m4 = tcg_constant_i32(get_field(s, m4));
} else {
- m4 = tcg_const_i32(0);
+ m4 = tcg_constant_i32(0);
}
gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
- tcg_temp_free_i32(m4);
return DISAS_NEXT;
}
@@ -2749,16 +2676,12 @@ static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
g_assert_not_reached();
};
- t_r1 = tcg_const_i32(r1);
- t_r2 = tcg_const_i32(r2);
- t_r3 = tcg_const_i32(r3);
- type = tcg_const_i32(s->insn->data);
+ t_r1 = tcg_constant_i32(r1);
+ t_r2 = tcg_constant_i32(r2);
+ t_r3 = tcg_constant_i32(r3);
+ type = tcg_constant_i32(s->insn->data);
gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
set_cc_static(s);
- tcg_temp_free_i32(t_r1);
- tcg_temp_free_i32(t_r2);
- tcg_temp_free_i32(t_r3);
- tcg_temp_free_i32(type);
return DISAS_NEXT;
}
@@ -2841,7 +2764,6 @@ static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_ledb(o->out, cpu_env, o->in2, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2853,7 +2775,6 @@ static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -2865,7 +2786,6 @@ static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
- tcg_temp_free_i32(m34);
return DISAS_NEXT;
}
@@ -3017,10 +2937,9 @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
tcg_gen_extu_i32_i64(t, t32);
tcg_temp_free_i32(t32);
- z = tcg_const_i64(0);
+ z = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
tcg_temp_free_i64(t);
- tcg_temp_free_i64(z);
}
return DISAS_NEXT;
@@ -3029,11 +2948,10 @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_lctl(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
/* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
s->exit_to_mainloop = true;
return DISAS_TOO_MANY;
@@ -3041,11 +2959,10 @@ static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_lctlg(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
/* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
s->exit_to_mainloop = true;
return DISAS_TOO_MANY;
@@ -3105,11 +3022,10 @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_lam(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
@@ -3319,9 +3235,6 @@ static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
{
-#if !defined(CONFIG_USER_ONLY)
- TCGv_i32 i2;
-#endif
const uint16_t monitor_class = get_field(s, i2);
if (monitor_class & 0xff00) {
@@ -3330,9 +3243,8 @@ static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
}
#if !defined(CONFIG_USER_ONLY)
- i2 = tcg_const_i32(monitor_class);
- gen_helper_monitor_call(cpu_env, o->addr1, i2);
- tcg_temp_free_i32(i2);
+ gen_helper_monitor_call(cpu_env, o->addr1,
+ tcg_constant_i32(monitor_class));
#endif
/* Defaults to a NOP. */
return DISAS_NEXT;
@@ -3396,9 +3308,9 @@ static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3410,9 +3322,9 @@ static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3428,11 +3340,9 @@ static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t2 = tcg_const_i32(r2);
+ t1 = tcg_constant_i32(r1);
+ t2 = tcg_constant_i32(r2);
gen_helper_mvcl(cc_op, cpu_env, t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3449,11 +3359,9 @@ static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t3 = tcg_const_i32(r3);
+ t1 = tcg_constant_i32(r1);
+ t3 = tcg_constant_i32(r3);
gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3470,11 +3378,9 @@ static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- t1 = tcg_const_i32(r1);
- t3 = tcg_const_i32(r3);
+ t1 = tcg_constant_i32(r1);
+ t3 = tcg_constant_i32(r3);
gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t3);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3509,49 +3415,45 @@ static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
{
- TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
gen_helper_mvst(cc_op, cpu_env, t1, t2);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3637,13 +3539,12 @@ static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
{
- TCGv_i64 z, n;
- z = tcg_const_i64(0);
- n = tcg_temp_new_i64();
+ TCGv_i64 z = tcg_constant_i64(0);
+ TCGv_i64 n = tcg_temp_new_i64();
+
tcg_gen_neg_i64(n, o->in2);
tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
tcg_temp_free_i64(n);
- tcg_temp_free_i64(z);
return DISAS_NEXT;
}
@@ -3668,9 +3569,9 @@ static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3702,9 +3603,9 @@ static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -3754,9 +3655,9 @@ static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_pack(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3770,9 +3671,8 @@ static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
- l = tcg_const_i32(l2);
+ l = tcg_constant_i32(l2);
gen_helper_pka(cpu_env, o->addr1, o->in2, l);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -3786,9 +3686,8 @@ static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
- l = tcg_const_i32(l2);
+ l = tcg_constant_i32(l2);
gen_helper_pku(cpu_env, o->addr1, o->in2, l);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -4035,9 +3934,8 @@ static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
}
s->pc_tmp &= mask;
- tsam = tcg_const_i64(sam);
+ tsam = tcg_constant_i64(sam);
tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
- tcg_temp_free_i64(tsam);
/* Always exit the TB, since we (may have) changed execution mode. */
return DISAS_TOO_MANY;
@@ -4096,12 +3994,11 @@ static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
set_cc_static(s);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
#endif
@@ -4370,21 +4267,19 @@ static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_stctg(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_stctl(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
@@ -4611,11 +4506,10 @@ static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+
gen_helper_stam(cpu_env, r1, o->in2, r3);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
return DISAS_NEXT;
}
@@ -4673,7 +4567,7 @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
int r1 = get_field(s, r1);
int r3 = get_field(s, r3);
int size = s->insn->data;
- TCGv_i64 tsize = tcg_const_i64(size);
+ TCGv_i64 tsize = tcg_constant_i64(size);
while (1) {
if (size == 8) {
@@ -4688,7 +4582,6 @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
r1 = (r1 + 1) & 15;
}
- tcg_temp_free_i64(tsize);
return DISAS_NEXT;
}
@@ -4697,8 +4590,8 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
int r1 = get_field(s, r1);
int r3 = get_field(s, r3);
TCGv_i64 t = tcg_temp_new_i64();
- TCGv_i64 t4 = tcg_const_i64(4);
- TCGv_i64 t32 = tcg_const_i64(32);
+ TCGv_i64 t4 = tcg_constant_i64(4);
+ TCGv_i64 t32 = tcg_constant_i64(32);
while (1) {
tcg_gen_shl_i64(t, regs[r1], t32);
@@ -4711,8 +4604,6 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
}
tcg_temp_free_i64(t);
- tcg_temp_free_i64(t4);
- tcg_temp_free_i64(t32);
return DISAS_NEXT;
}
@@ -4731,26 +4622,20 @@ static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_srst(cpu_env, r1, r2);
-
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_srstu(cpu_env, r1, r2);
-
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4808,10 +4693,9 @@ static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
* Borrow is {0, -1}, so add to subtract; replicate the
* borrow input to produce 128-bit -1 for the addition.
*/
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
- tcg_temp_free_i64(zero);
return DISAS_NEXT;
}
@@ -4823,13 +4707,11 @@ static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
update_psw_addr(s);
update_cc_op(s);
- t = tcg_const_i32(get_field(s, i1) & 0xff);
+ t = tcg_constant_i32(get_field(s, i1) & 0xff);
tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
- tcg_temp_free_i32(t);
- t = tcg_const_i32(s->ilen);
+ t = tcg_constant_i32(s->ilen);
tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
- tcg_temp_free_i32(t);
gen_exception(EXCP_SVC);
return DISAS_NORETURN;
@@ -4886,18 +4768,18 @@ static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
+ TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
+
gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
- tcg_temp_free_i32(l1);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_tr(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4915,27 +4797,27 @@ static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
- TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
+ TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
TCGv_i32 tst = tcg_temp_new_i32();
int m3 = get_field(s, m3);
@@ -4954,9 +4836,6 @@ static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
}
gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
- tcg_temp_free_i32(sizes);
tcg_temp_free_i32(tst);
set_cc_static(s);
return DISAS_NEXT;
@@ -4964,19 +4843,19 @@ static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
{
- TCGv_i32 t1 = tcg_const_i32(0xff);
+ TCGv_i32 t1 = tcg_constant_i32(0xff);
+
tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
tcg_gen_extract_i32(cc_op, t1, 7, 1);
- tcg_temp_free_i32(t1);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
{
- TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
+
gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
- tcg_temp_free_i32(l);
return DISAS_NEXT;
}
@@ -4990,9 +4869,8 @@ static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
- l = tcg_const_i32(l1);
+ l = tcg_constant_i32(l1);
gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -5007,9 +4885,8 @@ static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
- l = tcg_const_i32(l1);
+ l = tcg_constant_i32(l1);
gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
- tcg_temp_free_i32(l);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -5028,7 +4905,7 @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
/* If the addresses are identical, this is a store/memset of zero. */
if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
- o->in2 = tcg_const_i64(0);
+ o->in2 = tcg_constant_i64(0);
l++;
while (l >= 8) {
@@ -5061,9 +4938,8 @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
/* But in general we'll defer to a helper. */
o->in2 = get_address(s, 0, b2, d2);
- t32 = tcg_const_i32(l);
+ t32 = tcg_constant_i32(l);
gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
- tcg_temp_free_i32(t32);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -5128,46 +5004,39 @@ static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_clp(cpu_env, r2);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_pcilg(cpu_env, r1, r2);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_pcistg(cpu_env, r1, r2);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
- tcg_temp_free_i32(ar);
- tcg_temp_free_i32(r1);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -5180,38 +5049,31 @@ static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
gen_helper_rpcit(cpu_env, r1, r2);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r2);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
- TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
+ TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
- tcg_temp_free_i32(ar);
- tcg_temp_free_i32(r1);
- tcg_temp_free_i32(r3);
set_cc_static(s);
return DISAS_NEXT;
}
static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
{
- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
- TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
+ TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
+ TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
- tcg_temp_free_i32(ar);
- tcg_temp_free_i32(r1);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -6378,16 +6240,15 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
if (unlikely(s->ex_value)) {
/* Drop the EX data now, so that it's clear on exception paths. */
- TCGv_i64 zero = tcg_const_i64(0);
- int i;
- tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
- tcg_temp_free_i64(zero);
+ tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
+ offsetof(CPUS390XState, ex_value));
/* Extract the values saved by EXECUTE. */
insn = s->ex_value & 0xffffffffffff0000ull;
ilen = s->ex_value & 0xf;
- /* register insn bytes with translator so plugins work */
- for (i = 0; i < ilen; i++) {
+
+ /* Register insn bytes with translator so plugins work. */
+ for (int i = 0; i < ilen; i++) {
uint8_t byte = extract64(insn, 56 - (i * 8), 8);
translator_fake_ldb(byte, pc + i);
}
@@ -6512,9 +6373,8 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
#ifndef CONFIG_USER_ONLY
if (s->base.tb->flags & FLAG_MASK_PER) {
- TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
+ TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
gen_helper_per_ifetch(cpu_env, addr);
- tcg_temp_free_i64(addr);
}
#endif
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
index d39ee81cd6..3fadc82e5c 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -319,12 +319,10 @@ static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
uint64_t b)
{
- TCGv_i64 bl = tcg_const_i64(b);
- TCGv_i64 bh = tcg_const_i64(0);
+ TCGv_i64 bl = tcg_constant_i64(b);
+ TCGv_i64 bh = tcg_constant_i64(0);
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
- tcg_temp_free_i64(bl);
- tcg_temp_free_i64(bh);
}
static DisasJumpType op_vbperm(DisasContext *s, DisasOps *o)
@@ -609,9 +607,8 @@ static DisasJumpType op_vlei(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- tmp = tcg_const_i64((int16_t)get_field(s, i2));
+ tmp = tcg_constant_i64((int16_t)get_field(s, i2));
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1107,11 +1104,13 @@ static DisasJumpType op_vseg(DisasContext *s, DisasOps *o)
static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
{
- TCGv_i64 tmp = tcg_const_i64(16);
+ TCGv_i64 tmp;
/* Probe write access before actually modifying memory */
- gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
+ gen_helper_probe_write_access(cpu_env, o->addr1,
+ tcg_constant_i64(16));
+ tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
@@ -1270,9 +1269,10 @@ static DisasJumpType op_vstm(DisasContext *s, DisasOps *o)
}
/* Probe write access before actually modifying memory */
- tmp = tcg_const_i64((v3 - v1 + 1) * 16);
- gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
+ gen_helper_probe_write_access(cpu_env, o->addr1,
+ tcg_constant_i64((v3 - v1 + 1) * 16));
+ tmp = tcg_temp_new_i64();
for (;; v1++) {
read_vec_element_i64(tmp, v1, 0, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
@@ -1359,7 +1359,7 @@ static DisasJumpType op_va(DisasContext *s, DisasOps *o)
static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es)
{
const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1;
- TCGv_i64 msb_mask = tcg_const_i64(dup_const(es, 1ull << msb_bit_nr));
+ TCGv_i64 msb_mask = tcg_constant_i64(dup_const(es, 1ull << msb_bit_nr));
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
TCGv_i64 t3 = tcg_temp_new_i64();
@@ -1416,7 +1416,7 @@ static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
{
TCGv_i64 th = tcg_temp_new_i64();
TCGv_i64 tl = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_add2_i64(tl, th, al, zero, bl, zero);
tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
@@ -1425,7 +1425,6 @@ static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
tcg_temp_free_i64(th);
tcg_temp_free_i64(tl);
- tcg_temp_free_i64(zero);
}
static DisasJumpType op_vacc(DisasContext *s, DisasOps *o)
@@ -1455,15 +1454,14 @@ static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
{
TCGv_i64 tl = tcg_temp_new_i64();
- TCGv_i64 th = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
/* extract the carry only */
tcg_gen_extract_i64(tl, cl, 0, 1);
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
- tcg_gen_add2_i64(dl, dh, dl, dh, tl, th);
+ tcg_gen_add2_i64(dl, dh, dl, dh, tl, zero);
tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vac(DisasContext *s, DisasOps *o)
@@ -1484,7 +1482,7 @@ static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
{
TCGv_i64 tl = tcg_temp_new_i64();
TCGv_i64 th = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_andi_i64(tl, cl, 1);
tcg_gen_add2_i64(tl, th, tl, zero, al, zero);
@@ -1495,7 +1493,6 @@ static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
tcg_temp_free_i64(tl);
tcg_temp_free_i64(th);
- tcg_temp_free_i64(zero);
}
static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o)
@@ -1597,14 +1594,13 @@ static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
{
TCGv_i64 dh = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_add2_i64(dl, dh, al, zero, bl, zero);
gen_addi2_i64(dl, dh, dl, dh, 1);
tcg_gen_extract2_i64(dl, dl, dh, 1);
tcg_temp_free_i64(dh);
- tcg_temp_free_i64(zero);
}
static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o)
@@ -2440,7 +2436,7 @@ static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
{
TCGv_i64 th = tcg_temp_new_i64();
TCGv_i64 tl = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_sub2_i64(tl, th, al, zero, bl, zero);
tcg_gen_andi_i64(th, th, 1);
@@ -2452,7 +2448,6 @@ static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
tcg_temp_free_i64(th);
tcg_temp_free_i64(tl);
- tcg_temp_free_i64(zero);
}
static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o)
@@ -2572,11 +2567,12 @@ static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- sumh = tcg_const_i64(0);
+ sumh = tcg_temp_new_i64();
suml = tcg_temp_new_i64();
- zero = tcg_const_i64(0);
+ zero = tcg_constant_i64(0);
tmpl = tcg_temp_new_i64();
+ tcg_gen_mov_i64(sumh, zero);
read_vec_element_i64(suml, get_field(s, v3), max_idx, es);
for (idx = 0; idx <= max_idx; idx++) {
read_vec_element_i64(tmpl, get_field(s, v2), idx, es);
@@ -2587,7 +2583,6 @@ static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o)
tcg_temp_free_i64(sumh);
tcg_temp_free_i64(suml);
- tcg_temp_free_i64(zero);
tcg_temp_free_i64(tmpl);
return DISAS_NEXT;
}
diff --git a/tests/avocado/version.py b/tests/avocado/version.py
index ded7f039c1..dd775955eb 100644
--- a/tests/avocado/version.py
+++ b/tests/avocado/version.py
@@ -15,6 +15,7 @@ from avocado_qemu import QemuSystemTest
class Version(QemuSystemTest):
"""
:avocado: tags=quick
+ :avocado: tags=machine:none
"""
def test_qmp_human_info_version(self):
self.vm.add_args('-nodefaults')
diff --git a/tests/qemu-iotests/022 b/tests/qemu-iotests/022
index a116cfe255..d98d1ea90f 100755
--- a/tests/qemu-iotests/022
+++ b/tests/qemu-iotests/022
@@ -16,9 +16,7 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
-# USA
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# creator
diff --git a/tests/qtest/rtl8139-test.c b/tests/qtest/rtl8139-test.c
index 8fa3313cc3..1beb83805c 100644
--- a/tests/qtest/rtl8139-test.c
+++ b/tests/qtest/rtl8139-test.c
@@ -12,6 +12,8 @@
#include "libqos/pci-pc.h"
#include "qemu/timer.h"
+static int verbosity_level;
+
/* Tests only initialization so far. TODO: Replace with functional tests */
static void nop(void)
{
@@ -45,12 +47,16 @@ static QPCIDevice *get_device(void)
static unsigned __attribute__((unused)) in_##name(void) \
{ \
unsigned res = qpci_io_read##len(dev, dev_bar, (val)); \
- g_test_message("*%s -> %x", #name, res); \
+ if (verbosity_level >= 2) { \
+ g_test_message("*%s -> %x", #name, res); \
+ } \
return res; \
} \
static void out_##name(unsigned v) \
{ \
- g_test_message("%x -> *%s", v, #name); \
+ if (verbosity_level >= 2) { \
+ g_test_message("%x -> *%s", v, #name); \
+ } \
qpci_io_write##len(dev, dev_bar, (val), v); \
}
@@ -195,6 +201,11 @@ static void test_init(void)
int main(int argc, char **argv)
{
int ret;
+ char *v_env = getenv("V");
+
+ if (v_env) {
+ verbosity_level = atoi(v_env);
+ }
qtest_start("-device rtl8139");
diff --git a/tests/tcg/s390x/Makefile.softmmu-target b/tests/tcg/s390x/Makefile.softmmu-target
index 50c1b88065..725b6c598d 100644
--- a/tests/tcg/s390x/Makefile.softmmu-target
+++ b/tests/tcg/s390x/Makefile.softmmu-target
@@ -7,3 +7,5 @@ QEMU_OPTS=-action panic=exit-failure -kernel
-Wl,--build-id=none $< -o $@
TESTS += unaligned-lowcore
+TESTS += bal
+TESTS += sam
diff --git a/tests/tcg/s390x/bal.S b/tests/tcg/s390x/bal.S
new file mode 100644
index 0000000000..e54d8874ff
--- /dev/null
+++ b/tests/tcg/s390x/bal.S
@@ -0,0 +1,24 @@
+ .org 0x200 /* lowcore padding */
+ .globl _start
+_start:
+ lpswe start24_psw
+_start24:
+ lgrl %r0,initial_r0
+ lgrl %r1,expected_r0
+ bal %r0,0f
+0:
+ cgrjne %r0,%r1,1f
+ lpswe success_psw
+1:
+ lpswe failure_psw
+ .align 8
+start24_psw:
+ .quad 0x160000000000,_start24 /* 24-bit mode, cc = 1, pm = 6 */
+initial_r0:
+ .quad 0x1234567887654321
+expected_r0:
+ .quad 0x1234567896000000 + 0b /* ilc = 2, cc = 1, pm = 6 */
+success_psw:
+ .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */
+failure_psw:
+ .quad 0x2000000000000,0 /* disabled wait */
diff --git a/tests/tcg/s390x/sam.S b/tests/tcg/s390x/sam.S
new file mode 100644
index 0000000000..4cab2dd200
--- /dev/null
+++ b/tests/tcg/s390x/sam.S
@@ -0,0 +1,67 @@
+/* DAT on, home-space mode, 64-bit mode */
+#define DAT_PSWM 0x400c00180000000
+#define VIRTUAL_BASE 0x123456789abcd000
+
+ .org 0x8e
+program_interruption_code:
+ .org 0x150
+program_old_psw:
+ .org 0x1d0 /* program new PSW */
+ .quad 0,pgm_handler
+ .org 0x200 /* lowcore padding */
+
+ .globl _start
+_start:
+ lctlg %c13,%c13,hasce
+ lpswe dat_psw
+start_dat:
+ sam24
+sam24_suppressed:
+ /* sam24 should fail */
+fail:
+ basr %r12,%r0
+ lpswe failure_psw-.(%r12)
+pgm_handler:
+ chhsi program_interruption_code,6 /* specification exception? */
+ jne fail
+ clc suppressed_psw(16),program_old_psw /* correct location? */
+ jne fail
+ lpswe success_psw
+
+ .align 8
+dat_psw:
+ .quad DAT_PSWM,VIRTUAL_BASE+start_dat
+suppressed_psw:
+ .quad DAT_PSWM,VIRTUAL_BASE+sam24_suppressed
+success_psw:
+ .quad 0x2000000000000,0xfff /* see is_special_wait_psw() */
+failure_psw:
+ .quad 0x2000000000000,0 /* disabled wait */
+hasce:
+ /* DT = 0b11 (region-first-table), TL = 3 (2k entries) */
+ .quad region_first_table + (3 << 2) + 3
+ .align 0x1000
+region_first_table:
+ .org region_first_table + ((VIRTUAL_BASE >> 53) & 0x7ff) * 8
+ /* TT = 0b11 (region-first-table), TL = 3 (2k entries) */
+ .quad region_second_table + (3 << 2) + 3
+ .org region_first_table + 0x800 * 8
+region_second_table:
+ .org region_second_table + ((VIRTUAL_BASE >> 42) & 0x7ff) * 8
+ /* TT = 0b10 (region-second-table), TL = 3 (2k entries) */
+ .quad region_third_table + (2 << 2) + 3
+ .org region_second_table + 0x800 * 8
+region_third_table:
+ .org region_third_table + ((VIRTUAL_BASE >> 31) & 0x7ff) * 8
+ /* TT = 0b01 (region-third-table), TL = 3 (2k entries) */
+ .quad segment_table + (1 << 2) + 3
+ .org region_third_table + 0x800 * 8
+segment_table:
+ .org segment_table + ((VIRTUAL_BASE >> 20) & 0x7ff) * 8
+ /* TT = 0b00 (segment-table) */
+ .quad page_table
+ .org segment_table + 0x800 * 8
+page_table:
+ .org page_table + ((VIRTUAL_BASE >> 12) & 0xff) * 8
+ .quad 0
+ .org page_table + 0x100 * 8
diff --git a/tests/unit/rcutorture.c b/tests/unit/rcutorture.c
index 495a4e6f42..7662081683 100644
--- a/tests/unit/rcutorture.c
+++ b/tests/unit/rcutorture.c
@@ -50,8 +50,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
*
* Copyright (c) 2008 Paul E. McKenney, IBM Corporation.
*/
diff --git a/tests/unit/test-rcu-list.c b/tests/unit/test-rcu-list.c
index 64b81ae058..9964171da4 100644
--- a/tests/unit/test-rcu-list.c
+++ b/tests/unit/test-rcu-list.c
@@ -14,8 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
*
* Copyright (c) 2013 Mike D. Day, IBM Corporation.
*/
diff --git a/util/uri.c b/util/uri.c
index ff72c6005f..dcb3305236 100644
--- a/util/uri.c
+++ b/util/uri.c
@@ -43,8 +43,7 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library. If not, see <https://www.gnu.org/licenses/>.
*
* Authors:
* Richard W.M. Jones <rjones@redhat.com>