aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--MAINTAINERS81
-rw-r--r--accel/kvm/kvm-all.c231
-rw-r--r--accel/stubs/kvm-stub.c14
-rw-r--r--backends/tpm/tpm_emulator.c10
-rw-r--r--block/parallels.c9
-rw-r--r--block/qcow.c6
-rw-r--r--block/vdi.c6
-rw-r--r--block/vhdx.c6
-rw-r--r--block/vmdk.c6
-rw-r--r--block/vpc.c6
-rw-r--r--block/vvfat.c6
-rw-r--r--chardev/msmouse.c2
-rw-r--r--chardev/wctablet.c2
-rw-r--r--configs/targets/sparc-softmmu.mak1
-rw-r--r--configs/targets/sparc64-softmmu.mak1
-rw-r--r--contrib/elf2dmp/addrspace.c7
-rw-r--r--contrib/elf2dmp/main.c11
-rw-r--r--contrib/elf2dmp/pdb.c32
-rw-r--r--contrib/elf2dmp/qemu_elf.c7
-rw-r--r--docs/about/deprecated.rst8
-rw-r--r--docs/devel/index-internals.rst1
-rw-r--r--docs/devel/s390-cpu-topology.rst170
-rw-r--r--docs/interop/vhost-user.rst11
-rw-r--r--docs/system/arm/emulation.rst1
-rw-r--r--docs/system/s390x/cpu-topology.rst244
-rw-r--r--docs/system/target-i386-desc.rst.inc8
-rw-r--r--docs/system/target-s390x.rst1
-rw-r--r--dump/dump.c4
-rw-r--r--hw/9pfs/9p.c10
-rw-r--r--hw/acpi/cxl.c69
-rw-r--r--hw/acpi/pcihp.c5
-rw-r--r--hw/arm/aspeed.c101
-rw-r--r--hw/arm/aspeed_ast10x0.c53
-rw-r--r--hw/arm/aspeed_ast2400.c (renamed from hw/arm/aspeed_soc.c)197
-rw-r--r--hw/arm/aspeed_ast2600.c75
-rw-r--r--hw/arm/aspeed_soc_common.c154
-rw-r--r--hw/arm/boot.c95
-rw-r--r--hw/arm/fby35.c27
-rw-r--r--hw/arm/meson.build3
-rw-r--r--hw/arm/sbsa-ref.c21
-rw-r--r--hw/arm/smmuv3-internal.h38
-rw-r--r--hw/arm/smmuv3.c8
-rw-r--r--hw/arm/virt-acpi-build.c12
-rw-r--r--hw/arm/virt.c29
-rw-r--r--hw/block/vhost-user-blk.c10
-rw-r--r--hw/char/escc.c2
-rw-r--r--hw/core/cpu-sysemu.c6
-rw-r--r--hw/core/machine-hmp-cmds.c6
-rw-r--r--hw/core/machine-smp.c48
-rw-r--r--hw/core/machine.c4
-rw-r--r--hw/core/qdev-properties-system.c13
-rw-r--r--hw/display/virtio-dmabuf.c12
-rw-r--r--hw/display/virtio-gpu-base.c8
-rw-r--r--hw/display/virtio-gpu.c2
-rw-r--r--hw/display/xenfb.c6
-rw-r--r--hw/dma/xilinx_axidma.c6
-rw-r--r--hw/dma/xlnx-zdma.c7
-rw-r--r--hw/dma/xlnx_csu_dma.c13
-rw-r--r--hw/hppa/Kconfig1
-rw-r--r--hw/hppa/hppa_hardware.h1
-rw-r--r--hw/hppa/machine.c367
-rw-r--r--hw/i386/Kconfig3
-rw-r--r--hw/i386/acpi-build.c6
-rw-r--r--hw/i386/amd_iommu.c5
-rw-r--r--hw/i386/intel_iommu.c155
-rw-r--r--hw/i386/intel_iommu_internal.h1
-rw-r--r--hw/i386/kvm/clock.c4
-rw-r--r--hw/i386/kvm/i8254.c38
-rw-r--r--hw/i386/microvm.c2
-rw-r--r--hw/i386/pc.c27
-rw-r--r--hw/i386/pc_piix.c126
-rw-r--r--hw/i386/pc_q35.c14
-rw-r--r--hw/input/adb-kbd.c2
-rw-r--r--hw/input/hid.c6
-rw-r--r--hw/input/lasips2.c10
-rw-r--r--hw/input/ps2.c4
-rw-r--r--hw/input/virtio-input-hid.c8
-rw-r--r--hw/intc/apic_common.c4
-rw-r--r--hw/intc/arm_gic_kvm.c3
-rw-r--r--hw/intc/arm_gicv3_its_common.c3
-rw-r--r--hw/intc/arm_gicv3_its_kvm.c5
-rw-r--r--hw/intc/arm_gicv3_kvm.c3
-rw-r--r--hw/intc/spapr_xive.c12
-rw-r--r--hw/isa/Kconfig8
-rw-r--r--hw/isa/i82378.c5
-rw-r--r--hw/isa/isa-bus.c11
-rw-r--r--hw/isa/lpc_ich9.c9
-rw-r--r--hw/isa/meson.build3
-rw-r--r--hw/isa/piix.c (renamed from hw/isa/piix3.c)281
-rw-r--r--hw/isa/piix4.c302
-rw-r--r--hw/loongarch/virt.c2
-rw-r--r--hw/mips/Kconfig2
-rw-r--r--hw/mips/cps.c1
-rw-r--r--hw/mips/fuloong2e.c1
-rw-r--r--hw/mips/jazz.c6
-rw-r--r--hw/mips/loongson3_virt.c1
-rw-r--r--hw/mips/malta.c8
-rw-r--r--hw/mips/mips_int.c1
-rw-r--r--hw/mips/mipssim.c1
-rw-r--r--hw/misc/allwinner-r40-dramc.c20
-rw-r--r--hw/misc/bcm2835_property.c2
-rw-r--r--hw/misc/ivshmem.c8
-rw-r--r--hw/misc/mips_itu.c4
-rw-r--r--hw/misc/pci-testdev.c3
-rw-r--r--hw/net/cadence_gem.c7
-rw-r--r--hw/net/tulip.c2
-rw-r--r--hw/net/virtio-net.c6
-rw-r--r--hw/nvram/xlnx-bbram.c8
-rw-r--r--hw/nvram/xlnx-versal-efuse-ctrl.c8
-rw-r--r--hw/nvram/xlnx-zynqmp-efuse.c8
-rw-r--r--hw/pci-host/Kconfig4
-rw-r--r--hw/pci-host/astro.c885
-rw-r--r--hw/pci-host/bonito.c30
-rw-r--r--hw/pci-host/meson.build1
-rw-r--r--hw/pci-host/sh_pci.c57
-rw-r--r--hw/pci-host/trace-events11
-rw-r--r--hw/pci/pci.c25
-rw-r--r--hw/ppc/pef.c2
-rw-r--r--hw/ppc/pnv.c26
-rw-r--r--hw/ppc/pnv_xscom.c5
-rw-r--r--hw/ppc/ppc440_bamboo.c1
-rw-r--r--hw/ppc/ppc440_uc.c42
-rw-r--r--hw/ppc/spapr.c9
-rw-r--r--hw/ppc/spapr_events.c6
-rw-r--r--hw/ppc/spapr_rtas.c2
-rw-r--r--hw/ppc/spapr_vio.c3
-rw-r--r--hw/ppc/virtex_ml507.c1
-rw-r--r--hw/rdma/vmw/pvrdma_cmd.c18
-rw-r--r--hw/remote/proxy.c7
-rw-r--r--hw/s390x/cpu-topology.c469
-rw-r--r--hw/s390x/css-bridge.c7
-rw-r--r--hw/s390x/meson.build1
-rw-r--r--hw/s390x/s390-virtio-ccw.c38
-rw-r--r--hw/s390x/sclp.c5
-rw-r--r--hw/s390x/sclpquiesce.c8
-rw-r--r--hw/s390x/virtio-ccw.c4
-rw-r--r--hw/scsi/vhost-scsi-common.c47
-rw-r--r--hw/scsi/vhost-scsi.c14
-rw-r--r--hw/scsi/vhost-user-scsi.c250
-rw-r--r--hw/scsi/virtio-scsi.c2
-rw-r--r--hw/sd/sdhci.c15
-rw-r--r--hw/sparc64/sun4u.c8
-rw-r--r--hw/timer/i8254_common.c4
-rw-r--r--hw/timer/npcm7xx_timer.c3
-rw-r--r--hw/vfio/common.c10
-rw-r--r--hw/vfio/migration.c22
-rw-r--r--hw/virtio/vhost-backend.c6
-rw-r--r--hw/virtio/vhost-shadow-virtqueue.c2
-rw-r--r--hw/virtio/vhost-shadow-virtqueue.h1
-rw-r--r--hw/virtio/vhost-user-gpio.c5
-rw-r--r--hw/virtio/vhost-user.c246
-rw-r--r--hw/virtio/vhost.c17
-rw-r--r--hw/virtio/virtio-mmio.c4
-rw-r--r--hw/virtio/virtio-pci.c23
-rw-r--r--hw/virtio/virtio-pmem.c5
-rw-r--r--hw/virtio/virtio.c4
-rw-r--r--include/exec/memory.h2
-rw-r--r--include/exec/target_long.h2
-rw-r--r--include/hw/acpi/cxl.h1
-rw-r--r--include/hw/acpi/pcihp.h2
-rw-r--r--include/hw/arm/aspeed_soc.h35
-rw-r--r--include/hw/arm/bsa.h35
-rw-r--r--include/hw/arm/exynos4210.h2
-rw-r--r--include/hw/arm/raspberrypi-fw-defs.h (renamed from include/hw/misc/raspberrypi-fw-defs.h)0
-rw-r--r--include/hw/arm/virt.h12
-rw-r--r--include/hw/audio/pcspk.h10
-rw-r--r--include/hw/boards.h10
-rw-r--r--include/hw/core/cpu.h4
-rw-r--r--include/hw/core/sysemu-cpu-ops.h2
-rw-r--r--include/hw/i386/pc.h2
-rw-r--r--include/hw/mips/cpudevs.h14
-rw-r--r--include/hw/misc/mips_itu.h4
-rw-r--r--include/hw/nvram/xlnx-bbram.h2
-rw-r--r--include/hw/pci-host/astro.h92
-rw-r--r--include/hw/pci/pci.h9
-rw-r--r--include/hw/pci/pci_ids.h2
-rw-r--r--include/hw/ppc/pnv_xscom.h2
-rw-r--r--include/hw/qdev-properties-system.h4
-rw-r--r--include/hw/s390x/cpu-topology.h83
-rw-r--r--include/hw/s390x/s390-virtio-ccw.h6
-rw-r--r--include/hw/s390x/sclp.h4
-rw-r--r--include/hw/southbridge/piix.h28
-rw-r--r--include/hw/virtio/vhost-scsi-common.h2
-rw-r--r--include/hw/virtio/vhost-user-scsi.h6
-rw-r--r--include/hw/virtio/vhost-user.h6
-rw-r--r--include/hw/virtio/vhost.h12
-rw-r--r--include/hw/virtio/virtio-input.h2
-rw-r--r--include/migration/blocker.h24
-rw-r--r--include/migration/misc.h6
-rw-r--r--include/sysemu/kvm.h37
-rw-r--r--include/sysemu/kvm_int.h5
-rw-r--r--include/sysemu/memory_mapping.h2
-rw-r--r--include/tcg/tcg-op-common.h9
-rw-r--r--include/tcg/tcg-op.h6
-rw-r--r--include/tcg/tcg.h8
-rw-r--r--include/ui/input.h2
-rw-r--r--linux-user/elfload.c61
-rw-r--r--linux-user/mips/cpu_loop.c4
-rw-r--r--linux-user/mmap.c30
-rw-r--r--linux-user/sh4/signal.c8
-rw-r--r--linux-user/signal.c459
-rw-r--r--linux-user/sparc/target_syscall.h6
-rw-r--r--meson.build7
-rw-r--r--migration/migration-hmp-cmds.c5
-rw-r--r--migration/migration.c71
-rw-r--r--migration/multifd.c3
-rw-r--r--migration/ram-compress.c105
-rw-r--r--migration/ram-compress.h5
-rw-r--r--migration/ram.c50
-rw-r--r--net/vhost-vdpa.c381
-rw-r--r--pc-bios/hppa-firmware.imgbin732376 -> 755480 bytes
-rw-r--r--qapi/compat.json4
-rw-r--r--qapi/machine-common.json21
-rw-r--r--qapi/machine-target.json121
-rw-r--r--qapi/machine.json85
-rw-r--r--qapi/meson.build1
-rw-r--r--qapi/qapi-schema.json1
-rw-r--r--qemu-options.hx7
m---------roms/seabios-hppa0
-rw-r--r--scripts/qapi/gen.py2
-rw-r--r--scripts/qapi/parser.py5
-rw-r--r--scripts/qapi/schema.py5
-rw-r--r--stubs/migr-blocker.c4
-rw-r--r--subprojects/libvhost-user/libvhost-user.h3
-rw-r--r--system/memory.c16
-rw-r--r--system/memory_mapping.c17
-rw-r--r--system/qtest.c16
-rw-r--r--system/vl.c6
-rw-r--r--target/arm/arm-powerctl.c53
-rw-r--r--target/arm/common-semi-target.h4
-rw-r--r--target/arm/cpu-qom.h2
-rw-r--r--target/arm/cpu.c95
-rw-r--r--target/arm/cpu.h22
-rw-r--r--target/arm/helper.c19
-rw-r--r--target/arm/kvm.c28
-rw-r--r--target/arm/kvm64.c124
-rw-r--r--target/arm/tcg/cpu32.c4
-rw-r--r--target/arm/tcg/cpu64.c1
-rw-r--r--target/arm/tcg/translate-a64.c37
-rw-r--r--target/arm/tcg/translate.c37
-rw-r--r--target/i386/arch_memory_mapping.c6
-rw-r--r--target/i386/cpu.c4
-rw-r--r--target/i386/cpu.h2
-rw-r--r--target/i386/kvm/kvm.c233
-rw-r--r--target/i386/kvm/kvm_i386.h2
-rw-r--r--target/i386/nvmm/nvmm-all.c3
-rw-r--r--target/i386/ops_sse.h128
-rw-r--r--target/i386/sev.c2
-rw-r--r--target/i386/tcg/decode-new.c.inc234
-rw-r--r--target/i386/tcg/decode-new.h36
-rw-r--r--target/i386/tcg/emit.c.inc62
-rw-r--r--target/i386/tcg/ops_sse_header.h.inc14
-rw-r--r--target/i386/tcg/translate.c91
-rw-r--r--target/i386/whpx/whpx-all.c3
-rw-r--r--target/m68k/translate.c23
-rw-r--r--target/mips/cpu.h7
-rw-r--r--target/mips/sysemu/cp0_timer.c1
-rw-r--r--target/mips/tcg/sysemu/cp0_helper.c1
-rw-r--r--target/mips/tcg/sysemu/tlb_helper.c1
-rw-r--r--target/riscv/kvm/kvm-cpu.c2
-rw-r--r--target/rx/translate.c11
-rw-r--r--target/s390x/cpu-sysemu.c13
-rw-r--r--target/s390x/cpu.c16
-rw-r--r--target/s390x/cpu.h82
-rw-r--r--target/s390x/cpu_models.c1
-rw-r--r--target/s390x/kvm/kvm.c166
-rw-r--r--target/s390x/kvm/kvm_s390x.h1
-rw-r--r--target/s390x/kvm/meson.build3
-rw-r--r--target/s390x/kvm/stsi-topology.c334
-rw-r--r--target/sparc/cpu-feature.h.inc14
-rw-r--r--target/sparc/cpu.c72
-rw-r--r--target/sparc/cpu.h76
-rw-r--r--target/sparc/fop_helper.c17
-rw-r--r--target/sparc/helper.c8
-rw-r--r--target/sparc/helper.h16
-rw-r--r--target/sparc/insns.decode547
-rw-r--r--target/sparc/ldst_helper.c17
-rw-r--r--target/sparc/meson.build3
-rw-r--r--target/sparc/translate.c6911
-rw-r--r--target/sparc/vis_helper.c59
-rw-r--r--target/tricore/translate.c20
-rw-r--r--target/xtensa/translate.c12
-rw-r--r--tcg/aarch64/tcg-target.c.inc175
-rw-r--r--tcg/arm/tcg-target.c.inc209
-rw-r--r--tcg/i386/tcg-target.c.inc200
-rw-r--r--tcg/loongarch64/tcg-target.c.inc126
-rw-r--r--tcg/mips/tcg-target.c.inc215
-rw-r--r--tcg/optimize.c8
-rw-r--r--tcg/ppc/tcg-target.c.inc533
-rw-r--r--tcg/riscv/tcg-target.c.inc181
-rw-r--r--tcg/s390x/tcg-target.c.inc157
-rw-r--r--tcg/tcg-op-ldst.c28
-rw-r--r--tcg/tcg-op.c50
-rw-r--r--tcg/tcg.c13
-rw-r--r--tests/avocado/s390_topology.py439
-rw-r--r--tests/data/acpi/q35/DSDT.cxlbin9655 -> 9713 bytes
-rw-r--r--tests/qtest/cdrom-test.c6
-rw-r--r--tests/qtest/ipmi-bt-test.c2
-rw-r--r--tests/qtest/libqtest.c98
-rw-r--r--tests/qtest/libqtest.h32
-rw-r--r--tests/qtest/migration-helpers.c52
-rw-r--r--tests/qtest/migration-helpers.h4
-rw-r--r--tests/qtest/migration-test.c54
-rw-r--r--tests/qtest/npcm7xx_adc-test.c14
-rw-r--r--tests/qtest/rtl8139-test.c12
-rw-r--r--tests/qtest/virtio-scsi-test.c2
-rw-r--r--tests/tcg/i386/test-avx.c19
-rwxr-xr-xtests/tcg/i386/test-avx.py3
-rw-r--r--tests/unit/test-aio.c4
-rw-r--r--tests/unit/test-coroutine.c12
-rw-r--r--tests/unit/test-throttle.c1
-rwxr-xr-xtests/vm/freebsd5
-rw-r--r--ui/input-legacy.c2
-rw-r--r--ui/input.c4
-rw-r--r--ui/spice-core.c3
-rw-r--r--ui/vdagent.c7
-rw-r--r--util/cutils.c18
318 files changed, 12305 insertions, 7886 deletions
diff --git a/.mailmap b/.mailmap
index d214959288..94f19a0ac9 100644
--- a/.mailmap
+++ b/.mailmap
@@ -81,6 +81,9 @@ Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
+Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
+Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com>
+Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu>
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
Paul Brook <paul@nowt.org> <paul@codesourcery.com>
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 9bd4fe378d..cd8d6b140f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -245,6 +245,7 @@ M: Richard Henderson <richard.henderson@linaro.org>
S: Maintained
F: target/hppa/
F: disas/hppa.c
+F: tests/tcg/hppa/
LoongArch TCG CPUs
M: Song Gao <gaosong@loongson.cn>
@@ -258,6 +259,7 @@ M: Laurent Vivier <laurent@vivier.eu>
S: Maintained
F: target/m68k/
F: disas/m68k.c
+F: tests/tcg/m68k/
MicroBlaze TCG CPUs
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
@@ -284,7 +286,9 @@ R: Marek Vasut <marex@denx.de>
S: Orphan
F: target/nios2/
F: hw/nios2/
+F: hw/intc/nios2_vic.c
F: disas/nios2.c
+F: include/hw/intc/nios2_vic.h
F: configs/devices/nios2-softmmu/default.mak
F: tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh
F: tests/tcg/nios2/
@@ -295,6 +299,7 @@ S: Odd Fixes
F: docs/system/openrisc/cpu-features.rst
F: target/openrisc/
F: hw/openrisc/
+F: include/hw/openrisc/
F: tests/tcg/openrisc/
PowerPC TCG CPUs
@@ -307,6 +312,12 @@ F: target/ppc/
F: hw/ppc/ppc.c
F: hw/ppc/ppc_booke.c
F: include/hw/ppc/ppc.h
+F: hw/ppc/meson.build
+F: hw/ppc/trace*
+F: configs/devices/ppc*
+F: docs/system/ppc/embedded.rst
+F: docs/system/target-ppc.rst
+F: tests/tcg/ppc*/*
RISC-V TCG CPUs
M: Palmer Dabbelt <palmer@dabbelt.com>
@@ -325,6 +336,7 @@ F: hw/intc/riscv*
F: include/hw/riscv/
F: linux-user/host/riscv32/
F: linux-user/host/riscv64/
+F: tests/tcg/riscv64/
RISC-V XThead* extensions
M: Christoph Muellner <christoph.muellner@vrull.eu>
@@ -366,6 +378,7 @@ F: target/sh4/
F: hw/sh4/
F: disas/sh4.c
F: include/hw/sh4/
+F: tests/tcg/sh4/
SPARC TCG CPUs
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
@@ -376,6 +389,7 @@ F: hw/sparc/
F: hw/sparc64/
F: include/hw/sparc/sparc64.h
F: disas/sparc.c
+F: tests/tcg/sparc64/
X86 TCG CPUs
M: Paolo Bonzini <pbonzini@redhat.com>
@@ -885,7 +899,7 @@ S: Odd Fixes
F: hw/arm/raspi.c
F: hw/arm/raspi_platform.h
F: hw/*/bcm283*
-F: include/hw/arm/raspi*
+F: include/hw/arm/rasp*
F: include/hw/*/bcm283*
F: docs/system/arm/raspi.rst
@@ -1117,7 +1131,7 @@ F: docs/system/arm/emcraft-sf2.rst
ASPEED BMCs
M: Cédric Le Goater <clg@kaod.org>
M: Peter Maydell <peter.maydell@linaro.org>
-R: Andrew Jeffery <andrew@aj.id.au>
+R: Andrew Jeffery <andrew@codeconstruct.com.au>
R: Joel Stanley <joel@jms.id.au>
L: qemu-arm@nongnu.org
S: Maintained
@@ -1173,19 +1187,24 @@ F: hw/*/etraxfs_*.c
HP-PARISC Machines
------------------
-HP B160L
+HP B160L, HP C3700
M: Richard Henderson <richard.henderson@linaro.org>
R: Helge Deller <deller@gmx.de>
S: Odd Fixes
F: configs/devices/hppa-softmmu/default.mak
F: hw/hppa/
+F: hw/input/lasips2.c
F: hw/net/*i82596*
F: hw/misc/lasi.c
+F: hw/pci-host/astro.c
F: hw/pci-host/dino.c
+F: include/hw/input/lasips2.h
F: include/hw/misc/lasi.h
F: include/hw/net/lasi_82596.h
+F: include/hw/pci-host/astro.h
F: include/hw/pci-host/dino.h
F: pc-bios/hppa-firmware.img
+F: roms/seabios-hppa/
LoongArch Machines
------------------
@@ -1302,7 +1321,7 @@ Malta
M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Aurelien Jarno <aurelien@aurel32.net>
S: Odd Fixes
-F: hw/isa/piix4.c
+F: hw/isa/piix.c
F: hw/acpi/piix4.c
F: hw/mips/malta.c
F: hw/pci-host/gt64120.c
@@ -1322,10 +1341,7 @@ M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Jiaxun Yang <jiaxun.yang@flygoat.com>
S: Odd Fixes
F: hw/mips/fuloong2e.c
-F: hw/isa/vt82c686.c
F: hw/pci-host/bonito.c
-F: hw/usb/vt82c686-uhci-pci.c
-F: include/hw/isa/vt82c686.h
F: include/hw/pci-host/bonito.h
F: tests/avocado/machine_mips_fuloong2e.py
@@ -1337,6 +1353,7 @@ F: hw/intc/loongson_liointc.c
F: hw/mips/loongson3_bootp.c
F: hw/mips/loongson3_bootp.h
F: hw/mips/loongson3_virt.c
+F: include/hw/intc/loongson_liointc.h
F: tests/avocado/machine_mips_loongson3v.py
Boston
@@ -1354,6 +1371,7 @@ or1k-sim
M: Jia Liu <proljc@gmail.com>
S: Maintained
F: docs/system/openrisc/or1k-sim.rst
+F: hw/intc/ompic.c
F: hw/openrisc/openrisc_sim.c
PowerPC Machines
@@ -1361,7 +1379,8 @@ PowerPC Machines
405 (ref405ep)
L: qemu-ppc@nongnu.org
S: Orphan
-F: hw/ppc/ppc405_boards.c
+F: hw/ppc/ppc405*
+F: tests/avocado/ppc_405.py
Bamboo
L: qemu-ppc@nongnu.org
@@ -1373,6 +1392,7 @@ e500
L: qemu-ppc@nongnu.org
S: Orphan
F: hw/ppc/e500*
+F: hw/ppc/ppce500_spin.c
F: hw/gpio/mpc8xxx.c
F: hw/i2c/mpc_i2c.c
F: hw/net/fsl_etsec/
@@ -1380,8 +1400,9 @@ F: hw/pci-host/ppce500.c
F: include/hw/ppc/ppc_e500.h
F: include/hw/pci-host/ppce500.h
F: pc-bios/u-boot.e500
-F: hw/intc/openpic_kvm.h
+F: hw/intc/openpic_kvm.c
F: include/hw/ppc/openpic_kvm.h
+F: docs/system/ppc/ppce500.rst
mpc8544ds
L: qemu-ppc@nongnu.org
@@ -1401,6 +1422,7 @@ F: hw/pci-bridge/dec.[hc]
F: hw/misc/macio/
F: hw/misc/mos6522.c
F: hw/nvram/mac_nvram.c
+F: hw/ppc/fw_cfg.c
F: hw/input/adb*
F: include/hw/misc/macio/
F: include/hw/misc/mos6522.h
@@ -1454,6 +1476,10 @@ F: hw/*/spapr*
F: include/hw/*/spapr*
F: hw/*/xics*
F: include/hw/*/xics*
+F: include/hw/ppc/fdt.h
+F: hw/ppc/fdt.c
+F: include/hw/ppc/pef.h
+F: hw/ppc/pef.c
F: pc-bios/slof.bin
F: docs/system/ppc/pseries.rst
F: docs/specs/ppc-spapr-*
@@ -1491,6 +1517,7 @@ M: BALATON Zoltan <balaton@eik.bme.hu>
L: qemu-ppc@nongnu.org
S: Maintained
F: hw/ppc/sam460ex.c
+F: hw/ppc/ppc440_uc.c
F: hw/ppc/ppc440_pcix.c
F: hw/display/sm501*
F: hw/ide/sii3112.c
@@ -1710,6 +1737,16 @@ F: hw/s390x/event-facility.c
F: hw/s390x/sclp*.c
L: qemu-s390x@nongnu.org
+S390 CPU topology
+M: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
+S: Supported
+F: include/hw/s390x/cpu-topology.h
+F: hw/s390x/cpu-topology.c
+F: target/s390x/kvm/stsi-topology.c
+F: docs/devel/s390-cpu-topology.rst
+F: docs/system/s390x/cpu-topology.rst
+F: tests/avocado/s390_topology.py
+
X86 Machines
------------
PC
@@ -1724,7 +1761,7 @@ F: hw/pci-host/pam.c
F: include/hw/pci-host/i440fx.h
F: include/hw/pci-host/q35.h
F: include/hw/pci-host/pam.h
-F: hw/isa/piix3.c
+F: hw/isa/piix.c
F: hw/isa/lpc_ich9.c
F: hw/i2c/smbus_ich9.c
F: hw/acpi/piix4.c
@@ -1764,6 +1801,7 @@ F: include/hw/dma/i8257.h
F: include/hw/i2c/pm_smbus.h
F: include/hw/input/i8042.h
F: include/hw/intc/ioapic*
+F: include/hw/intc/i8259.h
F: include/hw/isa/i8259_internal.h
F: include/hw/isa/superio.h
F: include/hw/timer/hpet.h
@@ -1793,6 +1831,7 @@ F: hw/core/null-machine.c
F: hw/core/numa.c
F: hw/cpu/cluster.c
F: qapi/machine.json
+F: qapi/machine-common.json
F: qapi/machine-target.json
F: include/hw/boards.h
F: include/hw/core/cpu.h
@@ -1978,7 +2017,9 @@ F: docs/specs/acpi_hest_ghes.rst
ppc4xx
L: qemu-ppc@nongnu.org
S: Orphan
-F: hw/ppc/ppc4*.c
+F: hw/ppc/ppc4xx*.c
+F: hw/ppc/ppc440_uc.c
+F: hw/ppc/ppc440.h
F: hw/i2c/ppc4xx_i2c.c
F: include/hw/ppc/ppc4xx.h
F: include/hw/i2c/ppc4xx_i2c.h
@@ -2478,9 +2519,18 @@ PIIX4 South Bridge (i82371AB)
M: Hervé Poussineau <hpoussin@reactos.org>
M: Philippe Mathieu-Daudé <philmd@linaro.org>
S: Maintained
-F: hw/isa/piix4.c
+F: hw/isa/piix.c
F: include/hw/southbridge/piix.h
+VIA South Bridges (VT82C686B, VT8231)
+M: BALATON Zoltan <balaton@eik.bme.hu>
+M: Philippe Mathieu-Daudé <philmd@linaro.org>
+R: Jiaxun Yang <jiaxun.yang@flygoat.com>
+S: Maintained
+F: hw/isa/vt82c686.c
+F: hw/usb/vt82c686-uhci-pci.c
+F: include/hw/isa/vt82c686.h
+
Firmware configuration (fw_cfg)
M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Gerd Hoffmann <kraxel@redhat.com>
@@ -2574,7 +2624,7 @@ M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
S: Supported
F: hw/s390x/storage-keys.h
-F: hw/390x/s390-skeys*.c
+F: hw/s390x/s390-skeys*.c
L: qemu-s390x@nongnu.org
S390 storage attribute device
@@ -2582,7 +2632,7 @@ M: Halil Pasic <pasic@linux.ibm.com>
M: Christian Borntraeger <borntraeger@linux.ibm.com>
S: Supported
F: hw/s390x/storage-attributes.h
-F: hw/s390/s390-stattrib*.c
+F: hw/s390x/s390-stattrib*.c
L: qemu-s390x@nongnu.org
S390 floating interrupt controller
@@ -2927,7 +2977,7 @@ F: include/qemu/main-loop.h
F: include/sysemu/runstate.h
F: include/sysemu/runstate-action.h
F: util/main-loop.c
-F: util/qemu-timer.c
+F: util/qemu-timer*.c
F: system/vl.c
F: system/main.c
F: system/cpus.c
@@ -3912,6 +3962,7 @@ M: Jason Wang <jasowang@redhat.com>
R: Andrew Melnychenko <andrew@daynix.com>
R: Yuri Benditovich <yuri.benditovich@daynix.com>
S: Maintained
+F: docs/devel/ebpf_rss.rst
F: ebpf/*
F: tools/ebpf/*
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 3f7eafe08c..e39a810a4e 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -90,8 +90,6 @@ bool kvm_kernel_irqchip;
bool kvm_split_irqchip;
bool kvm_async_interrupts_allowed;
bool kvm_halt_in_kernel_allowed;
-bool kvm_eventfds_allowed;
-bool kvm_irqfds_allowed;
bool kvm_resamplefds_allowed;
bool kvm_msi_via_irqfd_allowed;
bool kvm_gsi_routing_allowed;
@@ -99,8 +97,6 @@ bool kvm_gsi_direct_mapping;
bool kvm_allowed;
bool kvm_readonly_mem_allowed;
bool kvm_vm_attributes_allowed;
-bool kvm_direct_msi_allowed;
-bool kvm_ioeventfd_any_length_allowed;
bool kvm_msi_use_devid;
bool kvm_has_guest_debug;
static int kvm_sstep_flags;
@@ -111,6 +107,9 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
KVM_CAP_INFO(USER_MEMORY),
KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
+ KVM_CAP_INFO(INTERNAL_ERROR_DATA),
+ KVM_CAP_INFO(IOEVENTFD),
+ KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
KVM_CAP_LAST_INFO
};
@@ -1106,13 +1105,6 @@ static void kvm_coalesce_pio_del(MemoryListener *listener,
}
}
-static MemoryListener kvm_coalesced_pio_listener = {
- .name = "kvm-coalesced-pio",
- .coalesced_io_add = kvm_coalesce_pio_add,
- .coalesced_io_del = kvm_coalesce_pio_del,
- .priority = MEMORY_LISTENER_PRIORITY_MIN,
-};
-
int kvm_check_extension(KVMState *s, unsigned int extension)
{
int ret;
@@ -1254,43 +1246,6 @@ static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
}
-static int kvm_check_many_ioeventfds(void)
-{
- /* Userspace can use ioeventfd for io notification. This requires a host
- * that supports eventfd(2) and an I/O thread; since eventfd does not
- * support SIGIO it cannot interrupt the vcpu.
- *
- * Older kernels have a 6 device limit on the KVM io bus. Find out so we
- * can avoid creating too many ioeventfds.
- */
-#if defined(CONFIG_EVENTFD)
- int ioeventfds[7];
- int i, ret = 0;
- for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
- ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
- if (ioeventfds[i] < 0) {
- break;
- }
- ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
- if (ret < 0) {
- close(ioeventfds[i]);
- break;
- }
- }
-
- /* Decide whether many devices are supported or not */
- ret = i == ARRAY_SIZE(ioeventfds);
-
- while (i-- > 0) {
- kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
- close(ioeventfds[i]);
- }
- return ret;
-#else
- return 0;
-#endif
-}
-
static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
{
@@ -1806,6 +1761,8 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
static MemoryListener kvm_io_listener = {
.name = "kvm-io",
+ .coalesced_io_add = kvm_coalesce_pio_add,
+ .coalesced_io_del = kvm_coalesce_pio_del,
.eventfd_add = kvm_io_ioeventfd_add,
.eventfd_del = kvm_io_ioeventfd_del,
.priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND,
@@ -1847,7 +1804,7 @@ static void clear_gsi(KVMState *s, unsigned int gsi)
void kvm_init_irq_routing(KVMState *s)
{
- int gsi_count, i;
+ int gsi_count;
gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
if (gsi_count > 0) {
@@ -1859,12 +1816,6 @@ void kvm_init_irq_routing(KVMState *s)
s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
s->nr_allocated_irq_routes = 0;
- if (!kvm_direct_msi_allowed) {
- for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
- QTAILQ_INIT(&s->msi_hashtab[i]);
- }
- }
-
kvm_arch_init_irq_routing(s);
}
@@ -1984,41 +1935,10 @@ void kvm_irqchip_change_notify(void)
notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
}
-static unsigned int kvm_hash_msi(uint32_t data)
-{
- /* This is optimized for IA32 MSI layout. However, no other arch shall
- * repeat the mistake of not providing a direct MSI injection API. */
- return data & 0xff;
-}
-
-static void kvm_flush_dynamic_msi_routes(KVMState *s)
-{
- KVMMSIRoute *route, *next;
- unsigned int hash;
-
- for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
- QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
- kvm_irqchip_release_virq(s, route->kroute.gsi);
- QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
- g_free(route);
- }
- }
-}
-
static int kvm_irqchip_get_virq(KVMState *s)
{
int next_virq;
- /*
- * PIC and IOAPIC share the first 16 GSI numbers, thus the available
- * GSI numbers are more than the number of IRQ route. Allocating a GSI
- * number can succeed even though a new route entry cannot be added.
- * When this happens, flush dynamic MSI entries to free IRQ route entries.
- */
- if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
- kvm_flush_dynamic_msi_routes(s);
- }
-
/* Return the lowest unused GSI in the bitmap */
next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
if (next_virq >= s->gsi_count) {
@@ -2028,63 +1948,17 @@ static int kvm_irqchip_get_virq(KVMState *s)
}
}
-static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
-{
- unsigned int hash = kvm_hash_msi(msg.data);
- KVMMSIRoute *route;
-
- QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
- if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
- route->kroute.u.msi.address_hi == (msg.address >> 32) &&
- route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
- return route;
- }
- }
- return NULL;
-}
-
int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
{
struct kvm_msi msi;
- KVMMSIRoute *route;
-
- if (kvm_direct_msi_allowed) {
- msi.address_lo = (uint32_t)msg.address;
- msi.address_hi = msg.address >> 32;
- msi.data = le32_to_cpu(msg.data);
- msi.flags = 0;
- memset(msi.pad, 0, sizeof(msi.pad));
- return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
- }
-
- route = kvm_lookup_msi_route(s, msg);
- if (!route) {
- int virq;
-
- virq = kvm_irqchip_get_virq(s);
- if (virq < 0) {
- return virq;
- }
+ msi.address_lo = (uint32_t)msg.address;
+ msi.address_hi = msg.address >> 32;
+ msi.data = le32_to_cpu(msg.data);
+ msi.flags = 0;
+ memset(msi.pad, 0, sizeof(msi.pad));
- route = g_new0(KVMMSIRoute, 1);
- route->kroute.gsi = virq;
- route->kroute.type = KVM_IRQ_ROUTING_MSI;
- route->kroute.flags = 0;
- route->kroute.u.msi.address_lo = (uint32_t)msg.address;
- route->kroute.u.msi.address_hi = msg.address >> 32;
- route->kroute.u.msi.data = le32_to_cpu(msg.data);
-
- kvm_add_routing_entry(s, &route->kroute);
- kvm_irqchip_commit_routes(s);
-
- QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
- entry);
- }
-
- assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
-
- return kvm_set_irq(s, route->kroute.gsi, 1);
+ return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
}
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
@@ -2211,10 +2085,6 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
}
}
- if (!kvm_irqfds_enabled()) {
- return -ENOSYS;
- }
-
return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
}
@@ -2375,6 +2245,11 @@ static void kvm_irqchip_create(KVMState *s)
return;
}
+ if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) {
+ fprintf(stderr, "kvm: irqfd not implemented\n");
+ exit(1);
+ }
+
/* First probe and see if there's a arch-specific hook to create the
* in-kernel irqchip for us */
ret = kvm_arch_irqchip_create(s);
@@ -2649,22 +2524,8 @@ static int kvm_init(MachineState *ms)
#ifdef KVM_CAP_VCPU_EVENTS
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
#endif
-
- s->robust_singlestep =
- kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
-
-#ifdef KVM_CAP_DEBUGREGS
- s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
-#endif
-
s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
-#ifdef KVM_CAP_IRQ_ROUTING
- kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
-#endif
-
- s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
-
s->irq_set_ioctl = KVM_IRQ_LINE;
if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
@@ -2673,21 +2534,12 @@ static int kvm_init(MachineState *ms)
kvm_readonly_mem_allowed =
(kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
- kvm_eventfds_allowed =
- (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
-
- kvm_irqfds_allowed =
- (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
-
kvm_resamplefds_allowed =
(kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
kvm_vm_attributes_allowed =
(kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
- kvm_ioeventfd_any_length_allowed =
- (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
-
#ifdef KVM_CAP_SET_GUEST_DEBUG
kvm_has_guest_debug =
(kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
@@ -2724,24 +2576,16 @@ static int kvm_init(MachineState *ms)
kvm_irqchip_create(s);
}
- if (kvm_eventfds_allowed) {
- s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
- s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
- }
+ s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
+ s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
kvm_memory_listener_register(s, &s->memory_listener,
&address_space_memory, 0, "kvm-memory");
- if (kvm_eventfds_allowed) {
- memory_listener_register(&kvm_io_listener,
- &address_space_io);
- }
- memory_listener_register(&kvm_coalesced_pio_listener,
+ memory_listener_register(&kvm_io_listener,
&address_space_io);
- s->many_ioeventfds = kvm_check_many_ioeventfds();
-
s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
if (!s->sync_mmu) {
ret = ram_block_discard_disable(true);
@@ -2794,16 +2638,14 @@ static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direc
static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
{
+ int i;
+
fprintf(stderr, "KVM internal error. Suberror: %d\n",
run->internal.suberror);
- if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
- int i;
-
- for (i = 0; i < run->internal.ndata; ++i) {
- fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
- i, (uint64_t)run->internal.data[i]);
- }
+ for (i = 0; i < run->internal.ndata; ++i) {
+ fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
+ i, (uint64_t)run->internal.data[i]);
}
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
fprintf(stderr, "emulation failure\n");
@@ -3297,29 +3139,11 @@ int kvm_has_vcpu_events(void)
return kvm_state->vcpu_events;
}
-int kvm_has_robust_singlestep(void)
-{
- return kvm_state->robust_singlestep;
-}
-
-int kvm_has_debugregs(void)
-{
- return kvm_state->debugregs;
-}
-
int kvm_max_nested_state_length(void)
{
return kvm_state->max_nested_state_len;
}
-int kvm_has_many_ioeventfds(void)
-{
- if (!kvm_enabled()) {
- return 0;
- }
- return kvm_state->many_ioeventfds;
-}
-
int kvm_has_gsi_routing(void)
{
#ifdef KVM_CAP_IRQ_ROUTING
@@ -3329,11 +3153,6 @@ int kvm_has_gsi_routing(void)
#endif
}
-int kvm_has_intx_set_mask(void)
-{
- return kvm_state->intx_set_mask;
-}
-
bool kvm_arm_supports_user_irq(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c
index 51f522e52e..1b37d9a302 100644
--- a/accel/stubs/kvm-stub.c
+++ b/accel/stubs/kvm-stub.c
@@ -17,17 +17,13 @@
KVMState *kvm_state;
bool kvm_kernel_irqchip;
bool kvm_async_interrupts_allowed;
-bool kvm_eventfds_allowed;
-bool kvm_irqfds_allowed;
bool kvm_resamplefds_allowed;
bool kvm_msi_via_irqfd_allowed;
bool kvm_gsi_routing_allowed;
bool kvm_gsi_direct_mapping;
bool kvm_allowed;
bool kvm_readonly_mem_allowed;
-bool kvm_ioeventfd_any_length_allowed;
bool kvm_msi_use_devid;
-bool kvm_direct_msi_allowed;
void kvm_flush_coalesced_mmio_buffer(void)
{
@@ -42,11 +38,6 @@ bool kvm_has_sync_mmu(void)
return false;
}
-int kvm_has_many_ioeventfds(void)
-{
- return 0;
-}
-
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
{
return 1;
@@ -92,11 +83,6 @@ void kvm_irqchip_change_notify(void)
{
}
-int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
-{
- return -ENOSYS;
-}
-
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
EventNotifier *rn, int virq)
{
diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c
index 402a2d6312..bf1a90f5d7 100644
--- a/backends/tpm/tpm_emulator.c
+++ b/backends/tpm/tpm_emulator.c
@@ -534,11 +534,8 @@ static int tpm_emulator_block_migration(TPMEmulator *tpm_emu)
error_setg(&tpm_emu->migration_blocker,
"Migration disabled: TPM emulator does not support "
"migration");
- if (migrate_add_blocker(tpm_emu->migration_blocker, &err) < 0) {
+ if (migrate_add_blocker(&tpm_emu->migration_blocker, &err) < 0) {
error_report_err(err);
- error_free(tpm_emu->migration_blocker);
- tpm_emu->migration_blocker = NULL;
-
return -1;
}
}
@@ -1016,10 +1013,7 @@ static void tpm_emulator_inst_finalize(Object *obj)
qapi_free_TPMEmulatorOptions(tpm_emu->options);
- if (tpm_emu->migration_blocker) {
- migrate_del_blocker(tpm_emu->migration_blocker);
- error_free(tpm_emu->migration_blocker);
- }
+ migrate_del_blocker(&tpm_emu->migration_blocker);
tpm_sized_buffer_reset(&state_blobs->volatil);
tpm_sized_buffer_reset(&state_blobs->permanent);
diff --git a/block/parallels.c b/block/parallels.c
index 6b46623241..1d695ce7fb 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -1369,9 +1369,8 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
bdrv_get_device_or_node_name(bs));
bdrv_graph_rdunlock_main_loop();
- ret = migrate_add_blocker(s->migration_blocker, errp);
+ ret = migrate_add_blocker(&s->migration_blocker, errp);
if (ret < 0) {
- error_setg(errp, "Migration blocker error");
goto fail;
}
qemu_co_mutex_init(&s->lock);
@@ -1406,7 +1405,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
ret = bdrv_check(bs, &res, BDRV_FIX_ERRORS | BDRV_FIX_LEAKS);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not repair corrupted image");
- migrate_del_blocker(s->migration_blocker);
+ migrate_del_blocker(&s->migration_blocker);
goto fail;
}
}
@@ -1423,7 +1422,6 @@ fail:
*/
parallels_free_used_bitmap(bs);
- error_free(s->migration_blocker);
g_free(s->bat_dirty_bmap);
qemu_vfree(s->header);
return ret;
@@ -1448,8 +1446,7 @@ static void parallels_close(BlockDriverState *bs)
g_free(s->bat_dirty_bmap);
qemu_vfree(s->header);
- migrate_del_blocker(s->migration_blocker);
- error_free(s->migration_blocker);
+ migrate_del_blocker(&s->migration_blocker);
}
static bool parallels_is_support_dirty_bitmaps(BlockDriverState *bs)
diff --git a/block/qcow.c b/block/qcow.c
index 38a16253b8..fdd4c83948 100644
--- a/block/qcow.c
+++ b/block/qcow.c
@@ -307,9 +307,8 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
bdrv_get_device_or_node_name(bs));
bdrv_graph_rdunlock_main_loop();
- ret = migrate_add_blocker(s->migration_blocker, errp);
+ ret = migrate_add_blocker(&s->migration_blocker, errp);
if (ret < 0) {
- error_free(s->migration_blocker);
goto fail;
}
@@ -802,8 +801,7 @@ static void qcow_close(BlockDriverState *bs)
g_free(s->cluster_cache);
g_free(s->cluster_data);
- migrate_del_blocker(s->migration_blocker);
- error_free(s->migration_blocker);
+ migrate_del_blocker(&s->migration_blocker);
}
static int coroutine_fn GRAPH_UNLOCKED
diff --git a/block/vdi.c b/block/vdi.c
index 3ed43b6f35..fd7e365383 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -498,9 +498,8 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
bdrv_get_device_or_node_name(bs));
bdrv_graph_rdunlock_main_loop();
- ret = migrate_add_blocker(s->migration_blocker, errp);
+ ret = migrate_add_blocker(&s->migration_blocker, errp);
if (ret < 0) {
- error_free(s->migration_blocker);
goto fail_free_bmap;
}
@@ -988,8 +987,7 @@ static void vdi_close(BlockDriverState *bs)
qemu_vfree(s->bmap);
- migrate_del_blocker(s->migration_blocker);
- error_free(s->migration_blocker);
+ migrate_del_blocker(&s->migration_blocker);
}
static int vdi_has_zero_init(BlockDriverState *bs)
diff --git a/block/vhdx.c b/block/vhdx.c
index 73cb214fb4..e37f8c0926 100644
--- a/block/vhdx.c
+++ b/block/vhdx.c
@@ -985,8 +985,7 @@ static void vhdx_close(BlockDriverState *bs)
s->bat = NULL;
qemu_vfree(s->parent_entries);
s->parent_entries = NULL;
- migrate_del_blocker(s->migration_blocker);
- error_free(s->migration_blocker);
+ migrate_del_blocker(&s->migration_blocker);
qemu_vfree(s->log.hdr);
s->log.hdr = NULL;
vhdx_region_unregister_all(s);
@@ -1097,9 +1096,8 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
error_setg(&s->migration_blocker, "The vhdx format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
- ret = migrate_add_blocker(s->migration_blocker, errp);
+ ret = migrate_add_blocker(&s->migration_blocker, errp);
if (ret < 0) {
- error_free(s->migration_blocker);
goto fail;
}
diff --git a/block/vmdk.c b/block/vmdk.c
index 8a3b152798..1335d39e16 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -1386,9 +1386,8 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags,
error_setg(&s->migration_blocker, "The vmdk format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
- ret = migrate_add_blocker(s->migration_blocker, errp);
+ ret = migrate_add_blocker(&s->migration_blocker, errp);
if (ret < 0) {
- error_free(s->migration_blocker);
goto fail;
}
@@ -2867,8 +2866,7 @@ static void vmdk_close(BlockDriverState *bs)
vmdk_free_extents(bs);
g_free(s->create_type);
- migrate_del_blocker(s->migration_blocker);
- error_free(s->migration_blocker);
+ migrate_del_blocker(&s->migration_blocker);
}
static int64_t coroutine_fn GRAPH_RDLOCK
diff --git a/block/vpc.c b/block/vpc.c
index 945847fe4a..c30cf8689a 100644
--- a/block/vpc.c
+++ b/block/vpc.c
@@ -452,9 +452,8 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
bdrv_get_device_or_node_name(bs));
bdrv_graph_rdunlock_main_loop();
- ret = migrate_add_blocker(s->migration_blocker, errp);
+ ret = migrate_add_blocker(&s->migration_blocker, errp);
if (ret < 0) {
- error_free(s->migration_blocker);
goto fail;
}
@@ -1190,8 +1189,7 @@ static void vpc_close(BlockDriverState *bs)
g_free(s->pageentry_u8);
#endif
- migrate_del_blocker(s->migration_blocker);
- error_free(s->migration_blocker);
+ migrate_del_blocker(&s->migration_blocker);
}
static QemuOptsList vpc_create_opts = {
diff --git a/block/vvfat.c b/block/vvfat.c
index b0415798c0..266e036dcd 100644
--- a/block/vvfat.c
+++ b/block/vvfat.c
@@ -1268,9 +1268,8 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
"The vvfat (rw) format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
- ret = migrate_add_blocker(s->migration_blocker, errp);
+ ret = migrate_add_blocker(&s->migration_blocker, errp);
if (ret < 0) {
- error_free(s->migration_blocker);
goto fail;
}
}
@@ -3239,8 +3238,7 @@ static void vvfat_close(BlockDriverState *bs)
g_free(s->cluster_buffer);
if (s->qcow) {
- migrate_del_blocker(s->migration_blocker);
- error_free(s->migration_blocker);
+ migrate_del_blocker(&s->migration_blocker);
}
}
diff --git a/chardev/msmouse.c b/chardev/msmouse.c
index ab8fe981d6..a774c397b4 100644
--- a/chardev/msmouse.c
+++ b/chardev/msmouse.c
@@ -171,7 +171,7 @@ static int msmouse_chr_write(struct Chardev *s, const uint8_t *buf, int len)
return len;
}
-static QemuInputHandler msmouse_handler = {
+static const QemuInputHandler msmouse_handler = {
.name = "QEMU Microsoft Mouse",
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
.event = msmouse_input_event,
diff --git a/chardev/wctablet.c b/chardev/wctablet.c
index 43bdf6b608..f4008bf35b 100644
--- a/chardev/wctablet.c
+++ b/chardev/wctablet.c
@@ -178,7 +178,7 @@ static void wctablet_input_sync(DeviceState *dev)
}
}
-static QemuInputHandler wctablet_handler = {
+static const QemuInputHandler wctablet_handler = {
.name = "QEMU Wacom Pen Tablet",
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS,
.event = wctablet_input_event,
diff --git a/configs/targets/sparc-softmmu.mak b/configs/targets/sparc-softmmu.mak
index 454eb35499..a5d9200382 100644
--- a/configs/targets/sparc-softmmu.mak
+++ b/configs/targets/sparc-softmmu.mak
@@ -1,2 +1,3 @@
TARGET_ARCH=sparc
TARGET_BIG_ENDIAN=y
+TARGET_SUPPORTS_MTTCG=y
diff --git a/configs/targets/sparc64-softmmu.mak b/configs/targets/sparc64-softmmu.mak
index d3f8a3b710..36ca64ec41 100644
--- a/configs/targets/sparc64-softmmu.mak
+++ b/configs/targets/sparc64-softmmu.mak
@@ -1,3 +1,4 @@
TARGET_ARCH=sparc64
TARGET_BASE_ARCH=sparc
TARGET_BIG_ENDIAN=y
+TARGET_SUPPORTS_MTTCG=y
diff --git a/contrib/elf2dmp/addrspace.c b/contrib/elf2dmp/addrspace.c
index 64b5d680ad..6f608a517b 100644
--- a/contrib/elf2dmp/addrspace.c
+++ b/contrib/elf2dmp/addrspace.c
@@ -72,10 +72,7 @@ int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf)
}
}
- ps->block = malloc(sizeof(*ps->block) * ps->block_nr);
- if (!ps->block) {
- return 1;
- }
+ ps->block = g_new(struct pa_block, ps->block_nr);
for (i = 0; i < phdr_nr; i++) {
if (phdr[i].p_type == PT_LOAD) {
@@ -97,7 +94,7 @@ int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf)
void pa_space_destroy(struct pa_space *ps)
{
ps->block_nr = 0;
- free(ps->block);
+ g_free(ps->block);
}
void va_space_set_dtb(struct va_space *vs, uint64_t dtb)
diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c
index 5db163bdbe..cbc38a7c10 100644
--- a/contrib/elf2dmp/main.c
+++ b/contrib/elf2dmp/main.c
@@ -120,14 +120,11 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb,
}
}
- kdbg = malloc(kdbg_hdr.Size);
- if (!kdbg) {
- return NULL;
- }
+ kdbg = g_malloc(kdbg_hdr.Size);
if (va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 0)) {
eprintf("Failed to extract entire KDBG\n");
- free(kdbg);
+ g_free(kdbg);
return NULL;
}
@@ -478,7 +475,7 @@ static bool pe_check_pdb_name(uint64_t base, void *start_addr,
}
if (memcmp(&rsds->Signature, sign_rsds, sizeof(sign_rsds))) {
- eprintf("CodeView signature is \'%.4s\', \'%s\' expected\n",
+ eprintf("CodeView signature is \'%.4s\', \'%.4s\' expected\n",
rsds->Signature, sign_rsds);
return false;
}
@@ -643,7 +640,7 @@ int main(int argc, char *argv[])
}
out_kdbg:
- free(kdbg);
+ g_free(kdbg);
out_pdb:
pdb_exit(&pdb);
out_pdb_file:
diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c
index 6ca5086f02..40991f5f4c 100644
--- a/contrib/elf2dmp/pdb.c
+++ b/contrib/elf2dmp/pdb.c
@@ -25,6 +25,10 @@
static uint32_t pdb_get_file_size(const struct pdb_reader *r, unsigned idx)
{
+ if (idx >= r->ds.toc->num_files) {
+ return 0;
+ }
+
return r->ds.toc->file_size[idx];
}
@@ -90,18 +94,18 @@ uint64_t pdb_resolve(uint64_t img_base, struct pdb_reader *r, const char *name)
static void pdb_reader_ds_exit(struct pdb_reader *r)
{
- free(r->ds.toc);
+ g_free(r->ds.toc);
}
static void pdb_exit_symbols(struct pdb_reader *r)
{
- free(r->modimage);
- free(r->symbols);
+ g_free(r->modimage);
+ g_free(r->symbols);
}
static void pdb_exit_segments(struct pdb_reader *r)
{
- free(r->segs);
+ g_free(r->segs);
}
static void *pdb_ds_read(const PDB_DS_HEADER *header,
@@ -116,10 +120,7 @@ static void *pdb_ds_read(const PDB_DS_HEADER *header,
nBlocks = (size + header->block_size - 1) / header->block_size;
- buffer = malloc(nBlocks * header->block_size);
- if (!buffer) {
- return NULL;
- }
+ buffer = g_malloc(nBlocks * header->block_size);
for (i = 0; i < nBlocks; i++) {
memcpy(buffer + i * header->block_size, (const char *)header +
@@ -159,16 +160,17 @@ static void *pdb_ds_read_file(struct pdb_reader* r, uint32_t file_number)
static int pdb_init_segments(struct pdb_reader *r)
{
- char *segs;
unsigned stream_idx = r->segments;
- segs = pdb_ds_read_file(r, stream_idx);
- if (!segs) {
+ r->segs = pdb_ds_read_file(r, stream_idx);
+ if (!r->segs) {
return 1;
}
- r->segs = segs;
r->segs_size = pdb_get_file_size(r, stream_idx);
+ if (!r->segs_size) {
+ return 1;
+ }
return 0;
}
@@ -201,7 +203,7 @@ static int pdb_init_symbols(struct pdb_reader *r)
return 0;
out_symbols:
- free(symbols);
+ g_free(symbols);
return err;
}
@@ -258,7 +260,7 @@ static int pdb_reader_init(struct pdb_reader *r, void *data)
out_sym:
pdb_exit_symbols(r);
out_root:
- free(r->ds.root);
+ g_free(r->ds.root);
out_ds:
pdb_reader_ds_exit(r);
@@ -269,7 +271,7 @@ static void pdb_reader_exit(struct pdb_reader *r)
{
pdb_exit_segments(r);
pdb_exit_symbols(r);
- free(r->ds.root);
+ g_free(r->ds.root);
pdb_reader_ds_exit(r);
}
diff --git a/contrib/elf2dmp/qemu_elf.c b/contrib/elf2dmp/qemu_elf.c
index de6ad744c6..055e6f8792 100644
--- a/contrib/elf2dmp/qemu_elf.c
+++ b/contrib/elf2dmp/qemu_elf.c
@@ -94,10 +94,7 @@ static int init_states(QEMU_Elf *qe)
printf("%zu CPU states has been found\n", cpu_nr);
- qe->state = malloc(sizeof(*qe->state) * cpu_nr);
- if (!qe->state) {
- return 1;
- }
+ qe->state = g_new(QEMUCPUState*, cpu_nr);
cpu_nr = 0;
@@ -115,7 +112,7 @@ static int init_states(QEMU_Elf *qe)
static void exit_states(QEMU_Elf *qe)
{
- free(qe->state);
+ g_free(qe->state);
}
static bool check_ehdr(QEMU_Elf *qe)
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
index 2febd2d12f..4e0eb2fe02 100644
--- a/docs/about/deprecated.rst
+++ b/docs/about/deprecated.rst
@@ -247,6 +247,14 @@ deprecated; use the new name ``dtb-randomness`` instead. The new name
better reflects the way this property affects all random data within
the device tree blob, not just the ``kaslr-seed`` node.
+``pc-i440fx-2.0`` up to ``pc-i440fx-2.3`` (since 8.2)
+'''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+These old machine types are quite neglected nowadays and thus might have
+various pitfalls with regards to live migration. Use a newer machine type
+instead.
+
+
Backend options
---------------
diff --git a/docs/devel/index-internals.rst b/docs/devel/index-internals.rst
index e1a93df263..6f81df92bc 100644
--- a/docs/devel/index-internals.rst
+++ b/docs/devel/index-internals.rst
@@ -14,6 +14,7 @@ Details about QEMU's various subsystems including how to add features to them.
migration
multi-process
reset
+ s390-cpu-topology
s390-dasd-ipl
tracing
vfio-migration
diff --git a/docs/devel/s390-cpu-topology.rst b/docs/devel/s390-cpu-topology.rst
new file mode 100644
index 0000000000..9eab28d5e5
--- /dev/null
+++ b/docs/devel/s390-cpu-topology.rst
@@ -0,0 +1,170 @@
+QAPI interface for S390 CPU topology
+====================================
+
+The following sections will explain the QAPI interface for S390 CPU topology
+with the help of exemplary output.
+For this, let's assume that QEMU has been started with the following
+command, defining 4 CPUs, where CPU[0] is defined by the -smp argument and will
+have default values:
+
+.. code-block:: bash
+
+ qemu-system-s390x \
+ -enable-kvm \
+ -cpu z14,ctop=on \
+ -smp 1,drawers=3,books=3,sockets=2,cores=2,maxcpus=36 \
+ -device z14-s390x-cpu,core-id=19,entitlement=high \
+ -device z14-s390x-cpu,core-id=11,entitlement=low \
+ -device z14-s390x-cpu,core-id=112,entitlement=high \
+ ...
+
+Additions to query-cpus-fast
+----------------------------
+
+The command query-cpus-fast allows querying the topology tree and
+modifiers for all configured vCPUs.
+
+.. code-block:: QMP
+
+ { "execute": "query-cpus-fast" }
+ {
+ "return": [
+ {
+ "dedicated": false,
+ "thread-id": 536993,
+ "props": {
+ "core-id": 0,
+ "socket-id": 0,
+ "drawer-id": 0,
+ "book-id": 0
+ },
+ "cpu-state": "operating",
+ "entitlement": "medium",
+ "qom-path": "/machine/unattached/device[0]",
+ "cpu-index": 0,
+ "target": "s390x"
+ },
+ {
+ "dedicated": false,
+ "thread-id": 537003,
+ "props": {
+ "core-id": 19,
+ "socket-id": 1,
+ "drawer-id": 0,
+ "book-id": 2
+ },
+ "cpu-state": "operating",
+ "entitlement": "high",
+ "qom-path": "/machine/peripheral-anon/device[0]",
+ "cpu-index": 19,
+ "target": "s390x"
+ },
+ {
+ "dedicated": false,
+ "thread-id": 537004,
+ "props": {
+ "core-id": 11,
+ "socket-id": 1,
+ "drawer-id": 0,
+ "book-id": 1
+ },
+ "cpu-state": "operating",
+ "entitlement": "low",
+ "qom-path": "/machine/peripheral-anon/device[1]",
+ "cpu-index": 11,
+ "target": "s390x"
+ },
+ {
+ "dedicated": true,
+ "thread-id": 537005,
+ "props": {
+ "core-id": 112,
+ "socket-id": 0,
+ "drawer-id": 3,
+ "book-id": 2
+ },
+ "cpu-state": "operating",
+ "entitlement": "high",
+ "qom-path": "/machine/peripheral-anon/device[2]",
+ "cpu-index": 112,
+ "target": "s390x"
+ }
+ ]
+ }
+
+
+QAPI command: set-cpu-topology
+------------------------------
+
+The command set-cpu-topology allows modifying the topology tree
+or the topology modifiers of a vCPU in the configuration.
+
+.. code-block:: QMP
+
+ { "execute": "set-cpu-topology",
+ "arguments": {
+ "core-id": 11,
+ "socket-id": 0,
+ "book-id": 0,
+ "drawer-id": 0,
+ "entitlement": "low",
+ "dedicated": false
+ }
+ }
+ {"return": {}}
+
+The core-id parameter is the only mandatory parameter and every
+unspecified parameter keeps its previous value.
+
+QAPI event CPU_POLARIZATION_CHANGE
+----------------------------------
+
+When a guest requests a modification of the polarization,
+QEMU sends a CPU_POLARIZATION_CHANGE event.
+
+When requesting the change, the guest only specifies horizontal or
+vertical polarization.
+It is the job of the entity administrating QEMU to set the dedication and fine
+grained vertical entitlement in response to this event.
+
+Note that a vertical polarized dedicated vCPU can only have a high
+entitlement, giving 6 possibilities for vCPU polarization:
+
+- Horizontal
+- Horizontal dedicated
+- Vertical low
+- Vertical medium
+- Vertical high
+- Vertical high dedicated
+
+Example of the event received when the guest issues the CPU instruction
+Perform Topology Function PTF(0) to request an horizontal polarization:
+
+.. code-block:: QMP
+
+ {
+ "timestamp": {
+ "seconds": 1687870305,
+ "microseconds": 566299
+ },
+ "event": "CPU_POLARIZATION_CHANGE",
+ "data": {
+ "polarization": "horizontal"
+ }
+ }
+
+QAPI query command: query-s390x-cpu-polarization
+------------------------------------------------
+
+The query command query-s390x-cpu-polarization returns the current
+CPU polarization of the machine.
+In this case the guest previously issued a PTF(1) to request vertical polarization:
+
+.. code-block:: QMP
+
+ { "execute": "query-s390x-cpu-polarization" }
+ {
+ "return": {
+ "polarization": "vertical"
+ }
+ }
diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst
index 415bb47a19..768fb5c28c 100644
--- a/docs/interop/vhost-user.rst
+++ b/docs/interop/vhost-user.rst
@@ -275,6 +275,16 @@ Inflight description
:queue size: a 16-bit size of virtqueues
+VhostUserShared
+^^^^^^^^^^^^^^^
+
++------+
+| UUID |
++------+
+
+:UUID: 16 bytes UUID, whose first three components (a 32-bit value, then
+ two 16-bit values) are stored in big endian.
+
C structure
-----------
@@ -885,6 +895,7 @@ Protocol features
#define VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS 15
#define VHOST_USER_PROTOCOL_F_STATUS 16
#define VHOST_USER_PROTOCOL_F_XEN_MMAP 17
+ #define VHOST_USER_PROTOCOL_F_SHARED_OBJECT 18
Front-end message types
-----------------------
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
index 965cbf84c5..47fd648035 100644
--- a/docs/system/arm/emulation.rst
+++ b/docs/system/arm/emulation.rst
@@ -46,6 +46,7 @@ the following architecture extensions:
- FEAT_HCX (Support for the HCRX_EL2 register)
- FEAT_HPDS (Hierarchical permission disables)
- FEAT_HPDS2 (Translation table page-based hardware attributes)
+- FEAT_HPMN0 (Setting of MDCR_EL2.HPMN to zero)
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
- FEAT_IDST (ID space trap handling)
- FEAT_IESB (Implicit error synchronization event)
diff --git a/docs/system/s390x/cpu-topology.rst b/docs/system/s390x/cpu-topology.rst
new file mode 100644
index 0000000000..5133fdc362
--- /dev/null
+++ b/docs/system/s390x/cpu-topology.rst
@@ -0,0 +1,244 @@
+.. _cpu-topology-s390x:
+
+CPU topology on s390x
+=====================
+
+Since QEMU 8.2, CPU topology on s390x provides up to 3 levels of
+topology containers: drawers, books and sockets. They define a
+tree-shaped hierarchy.
+
+The socket container has one or more CPU entries.
+Each of these CPU entries consists of a bitmap and three CPU attributes:
+
+- CPU type
+- entitlement
+- dedication
+
+Each bit set in the bitmap correspond to a core-id of a vCPU with matching
+attributes.
+
+This documentation provides general information on S390 CPU topology,
+how to enable it and explains the new CPU attributes.
+For information on how to modify the S390 CPU topology and how to
+monitor polarization changes, see ``docs/devel/s390-cpu-topology.rst``.
+
+Prerequisites
+-------------
+
+To use the CPU topology, you need to run with KVM on a s390x host that
+uses the Linux kernel v6.0 or newer (which provide the so-called
+``KVM_CAP_S390_CPU_TOPOLOGY`` capability that allows QEMU to signal the
+CPU topology facility via the so-called STFLE bit 11 to the VM).
+
+Enabling CPU topology
+---------------------
+
+Currently, CPU topology is only enabled in the host model by default.
+
+Enabling CPU topology in a CPU model is done by setting the CPU flag
+``ctop`` to ``on`` as in:
+
+.. code-block:: bash
+
+ -cpu gen16b,ctop=on
+
+Having the topology disabled by default allows migration between
+old and new QEMU without adding new flags.
+
+Default topology usage
+----------------------
+
+The CPU topology can be specified on the QEMU command line
+with the ``-smp`` or the ``-device`` QEMU command arguments.
+
+Note also that since 7.2 threads are no longer supported in the topology
+and the ``-smp`` command line argument accepts only ``threads=1``.
+
+If none of the containers attributes (drawers, books, sockets) are
+specified for the ``-smp`` flag, the number of these containers
+is 1.
+
+Thus the following two options will result in the same topology:
+
+.. code-block:: bash
+
+ -smp cpus=5,drawer=1,books=1,sockets=8,cores=4,maxcpus=32
+
+and
+
+.. code-block:: bash
+
+ -smp cpus=5,sockets=8,cores=4,maxcpus=32
+
+When a CPU is defined by the ``-smp`` command argument, its position
+inside the topology is calculated by adding the CPUs to the topology
+based on the core-id starting with core-0 at position 0 of socket-0,
+book-0, drawer-0 and filling all CPUs of socket-0 before filling socket-1
+of book-0 and so on up to the last socket of the last book of the last
+drawer.
+
+When a CPU is defined by the ``-device`` command argument, the
+tree topology attributes must all be defined or all not defined.
+
+.. code-block:: bash
+
+ -device gen16b-s390x-cpu,drawer-id=1,book-id=1,socket-id=2,core-id=1
+
+or
+
+.. code-block:: bash
+
+ -device gen16b-s390x-cpu,core-id=1,dedicated=true
+
+If none of the tree attributes (drawer, book, sockets), are specified
+for the ``-device`` argument, like for all CPUs defined with the ``-smp``
+command argument the topology tree attributes will be set by simply
+adding the CPUs to the topology based on the core-id.
+
+QEMU will not try to resolve collisions and will report an error if the
+CPU topology defined explicitly or implicitly on a ``-device``
+argument collides with the definition of a CPU implicitly defined
+on the ``-smp`` argument.
+
+When the topology modifier attributes are not defined for the
+``-device`` command argument they takes following default values:
+
+- dedicated: ``false``
+- entitlement: ``medium``
+
+
+Hot plug
+++++++++
+
+New CPUs can be plugged using the device_add hmp command as in:
+
+.. code-block:: bash
+
+ (qemu) device_add gen16b-s390x-cpu,core-id=9
+
+The placement of the CPU is derived from the core-id as described above.
+
+The topology can of course also be fully defined:
+
+.. code-block:: bash
+
+ (qemu) device_add gen16b-s390x-cpu,drawer-id=1,book-id=1,socket-id=2,core-id=1
+
+
+Examples
+++++++++
+
+In the following machine we define 8 sockets with 4 cores each.
+
+.. code-block:: bash
+
+ $ qemu-system-s390x -m 2G \
+ -cpu gen16b,ctop=on \
+ -smp cpus=5,sockets=8,cores=4,maxcpus=32 \
+ -device host-s390x-cpu,core-id=14 \
+
+A new CPUs can be plugged using the device_add hmp command as before:
+
+.. code-block:: bash
+
+ (qemu) device_add gen16b-s390x-cpu,core-id=9
+
+The core-id defines the placement of the core in the topology by
+starting with core 0 in socket 0 up to maxcpus.
+
+In the example above:
+
+* There are 5 CPUs provided to the guest with the ``-smp`` command line
+ They will take the core-ids 0,1,2,3,4
+ As we have 4 cores in a socket, we have 4 CPUs provided
+ to the guest in socket 0, with core-ids 0,1,2,3.
+ The last CPU, with core-id 4, will be on socket 1.
+
+* the core with ID 14 provided by the ``-device`` command line will
+ be placed in socket 3, with core-id 14
+
+* the core with ID 9 provided by the ``device_add`` qmp command will
+ be placed in socket 2, with core-id 9
+
+
+Polarization, entitlement and dedication
+----------------------------------------
+
+Polarization
+++++++++++++
+
+The polarization affects how the CPUs of a shared host are utilized/distributed
+among guests.
+The guest determines the polarization by using the PTF instruction.
+
+Polarization defines two models of CPU provisioning: horizontal
+and vertical.
+
+The horizontal polarization is the default model on boot and after
+subsystem reset. When horizontal polarization is in effect all vCPUs should
+have about equal resource provisioning.
+
+In the vertical polarization model vCPUs are unequal, but overall more resources
+might be available.
+The guest can make use of the vCPU entitlement information provided by the host
+to optimize kernel thread scheduling.
+
+A subsystem reset puts all vCPU of the configuration into the
+horizontal polarization.
+
+Entitlement
++++++++++++
+
+The vertical polarization specifies that the guest's vCPU can get
+different real CPU provisioning:
+
+- a vCPU with vertical high entitlement specifies that this
+ vCPU gets 100% of the real CPU provisioning.
+
+- a vCPU with vertical medium entitlement specifies that this
+ vCPU shares the real CPU with other vCPUs.
+
+- a vCPU with vertical low entitlement specifies that this
+ vCPU only gets real CPU provisioning when no other vCPUs needs it.
+
+In the case a vCPU with vertical high entitlement does not use
+the real CPU, the unused "slack" can be dispatched to other vCPU
+with medium or low entitlement.
+
+A vCPU can be "dedicated" in which case the vCPU is fully dedicated to a single
+real CPU.
+
+The dedicated bit is an indication of affinity of a vCPU for a real CPU
+while the entitlement indicates the sharing or exclusivity of use.
+
+Defining the topology on the command line
+-----------------------------------------
+
+The topology can entirely be defined using -device cpu statements,
+with the exception of CPU 0 which must be defined with the -smp
+argument.
+
+For example, here we set the position of the cores 1,2,3 to
+drawer 1, book 1, socket 2 and cores 0,9 and 14 to drawer 0,
+book 0, socket 0 without defining entitlement or dedication.
+Core 4 will be set on its default position on socket 1
+(since we have 4 core per socket) and we define it as dedicated and
+with vertical high entitlement.
+
+.. code-block:: bash
+
+ $ qemu-system-s390x -m 2G \
+ -cpu gen16b,ctop=on \
+ -smp cpus=1,sockets=8,cores=4,maxcpus=32 \
+ \
+ -device gen16b-s390x-cpu,drawer-id=1,book-id=1,socket-id=2,core-id=1 \
+ -device gen16b-s390x-cpu,drawer-id=1,book-id=1,socket-id=2,core-id=2 \
+ -device gen16b-s390x-cpu,drawer-id=1,book-id=1,socket-id=2,core-id=3 \
+ \
+ -device gen16b-s390x-cpu,drawer-id=0,book-id=0,socket-id=0,core-id=9 \
+ -device gen16b-s390x-cpu,drawer-id=0,book-id=0,socket-id=0,core-id=14 \
+ \
+ -device gen16b-s390x-cpu,core-id=4,dedicated=on,entitlement=high
+
+The entitlement defined for the CPU 4 will only be used after the guest
+successfully enables vertical polarization by using the PTF instruction.
diff --git a/docs/system/target-i386-desc.rst.inc b/docs/system/target-i386-desc.rst.inc
index 7d1fffacbe..5ebbcda9db 100644
--- a/docs/system/target-i386-desc.rst.inc
+++ b/docs/system/target-i386-desc.rst.inc
@@ -71,3 +71,11 @@ machine property, i.e.
|qemu_system_x86| some.img \
-audiodev <backend>,id=<name> \
-machine pcspk-audiodev=<name>
+
+Machine-specific options
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+It supports the following machine-specific options:
+
+- ``x-south-bridge=PIIX3|piix4-isa`` (Experimental option to select a particular
+ south bridge. Default: ``PIIX3``)
diff --git a/docs/system/target-s390x.rst b/docs/system/target-s390x.rst
index f6f11433c7..94c981e732 100644
--- a/docs/system/target-s390x.rst
+++ b/docs/system/target-s390x.rst
@@ -34,3 +34,4 @@ Architectural features
.. toctree::
s390x/bootdevices
s390x/protvirt
+ s390x/cpu-topology
diff --git a/dump/dump.c b/dump/dump.c
index d3578ddc62..d355ada62e 100644
--- a/dump/dump.c
+++ b/dump/dump.c
@@ -111,7 +111,7 @@ static int dump_cleanup(DumpState *s)
qemu_mutex_unlock_iothread();
}
}
- migrate_del_blocker(dump_migration_blocker);
+ migrate_del_blocker(&dump_migration_blocker);
return 0;
}
@@ -2158,7 +2158,7 @@ void qmp_dump_guest_memory(bool paging, const char *file,
* Allows even for -only-migratable, but forbid migration during the
* process of dump guest memory.
*/
- if (migrate_add_blocker_internal(dump_migration_blocker, errp)) {
+ if (migrate_add_blocker_internal(&dump_migration_blocker, errp)) {
/* Remember to release the fd before passing it over to dump state */
close(fd);
return;
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 323f042e65..af636cfb2d 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -406,11 +406,7 @@ static int coroutine_fn put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
* delete the migration blocker. Ideally, this
* should be hooked to transport close notification
*/
- if (pdu->s->migration_blocker) {
- migrate_del_blocker(pdu->s->migration_blocker);
- error_free(pdu->s->migration_blocker);
- pdu->s->migration_blocker = NULL;
- }
+ migrate_del_blocker(&pdu->s->migration_blocker);
}
return free_fid(pdu, fidp);
}
@@ -1505,10 +1501,8 @@ static void coroutine_fn v9fs_attach(void *opaque)
error_setg(&s->migration_blocker,
"Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'",
s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
- err = migrate_add_blocker(s->migration_blocker, NULL);
+ err = migrate_add_blocker(&s->migration_blocker, NULL);
if (err < 0) {
- error_free(s->migration_blocker);
- s->migration_blocker = NULL;
clunk_fid(s, fid);
goto out;
}
diff --git a/hw/acpi/cxl.c b/hw/acpi/cxl.c
index 92b46bc932..9cd7905ea2 100644
--- a/hw/acpi/cxl.c
+++ b/hw/acpi/cxl.c
@@ -30,6 +30,75 @@
#include "qapi/error.h"
#include "qemu/uuid.h"
+void build_cxl_dsm_method(Aml *dev)
+{
+ Aml *method, *ifctx, *ifctx2;
+
+ method = aml_method("_DSM", 4, AML_SERIALIZED);
+ {
+ Aml *function, *uuid;
+
+ uuid = aml_arg(0);
+ function = aml_arg(2);
+ /* CXL spec v3.0 9.17.3.1 _DSM Function for Retrieving QTG ID */
+ ifctx = aml_if(aml_equal(
+ uuid, aml_touuid("F365F9A6-A7DE-4071-A66A-B40C0B4F8E52")));
+
+ /* Function 0, standard DSM query function */
+ ifctx2 = aml_if(aml_equal(function, aml_int(0)));
+ {
+ uint8_t byte_list[1] = { 0x01 }; /* function 1 only */
+
+ aml_append(ifctx2,
+ aml_return(aml_buffer(sizeof(byte_list), byte_list)));
+ }
+ aml_append(ifctx, ifctx2);
+
+ /*
+ * Function 1
+ * Creating a package with static values. The max supported QTG ID will
+ * be 1 and recommended QTG IDs are 0 and then 1.
+ * The values here are statically created to simplify emulation. Values
+ * from a real BIOS would be determined by the performance of all the
+ * present CXL memory and then assigned.
+ */
+ ifctx2 = aml_if(aml_equal(function, aml_int(1)));
+ {
+ Aml *pak, *pak1;
+
+ /*
+ * Return: A package containing two elements - a WORD that returns
+ * the maximum throttling group that the platform supports, and a
+ * package containing the QTG ID(s) that the platform recommends.
+ * Package {
+ * Max Supported QTG ID
+ * Package {QTG Recommendations}
+ * }
+ *
+ * While the SPEC specified WORD that hints at the value being
+ * 16bit, the ACPI dump of BIOS DSDT table showed that the values
+ * are integers with no specific size specification. aml_int() will
+ * be used for the values.
+ */
+ pak1 = aml_package(2);
+ /* Set QTG ID of 0 */
+ aml_append(pak1, aml_int(0));
+ /* Set QTG ID of 1 */
+ aml_append(pak1, aml_int(1));
+
+ pak = aml_package(2);
+ /* Set Max QTG 1 */
+ aml_append(pak, aml_int(1));
+ aml_append(pak, pak1);
+
+ aml_append(ifctx2, aml_return(pak));
+ }
+ aml_append(ifctx, ifctx2);
+ }
+ aml_append(method, ifctx);
+ aml_append(dev, method);
+}
+
static void cedt_build_chbs(GArray *table_data, PXBCXLDev *cxl)
{
PXBDev *pxb = PXB_DEV(cxl);
diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c
index cdd6f775a1..4f75c873e2 100644
--- a/hw/acpi/pcihp.c
+++ b/hw/acpi/pcihp.c
@@ -496,8 +496,7 @@ static const MemoryRegionOps acpi_pcihp_io_ops = {
};
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
- MemoryRegion *address_space_io,
- uint16_t io_base)
+ MemoryRegion *io, uint16_t io_base)
{
s->io_len = ACPI_PCIHP_SIZE;
s->io_base = io_base;
@@ -506,7 +505,7 @@ void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
memory_region_init_io(&s->io, owner, &acpi_pcihp_io_ops, s,
"acpi-pci-hotplug", s->io_len);
- memory_region_add_subregion(address_space_io, s->io_base, &s->io);
+ memory_region_add_subregion(io, s->io_base, &s->io);
object_property_add_uint16_ptr(owner, ACPI_PCIHP_IO_BASE_PROP, &s->io_base,
OBJ_PROP_FLAG_READ);
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
index f8ba67531a..cc59176563 100644
--- a/hw/arm/aspeed.c
+++ b/hw/arm/aspeed.c
@@ -40,7 +40,7 @@ struct AspeedMachineState {
MachineState parent_obj;
/* Public */
- AspeedSoCState soc;
+ AspeedSoCState *soc;
MemoryRegion boot_rom;
bool mmio_exec;
uint32_t uart_chosen;
@@ -288,7 +288,7 @@ static void write_boot_rom(BlockBackend *blk, hwaddr addr, size_t rom_size,
static void aspeed_install_boot_rom(AspeedMachineState *bmc, BlockBackend *blk,
uint64_t rom_size)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
memory_region_init_rom(&bmc->boot_rom, NULL, "aspeed.boot_rom", rom_size,
&error_abort);
@@ -337,7 +337,7 @@ static void sdhci_attach_drive(SDHCIState *sdhci, DriveInfo *dinfo)
static void connect_serial_hds_to_uarts(AspeedMachineState *bmc)
{
AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(bmc);
- AspeedSoCState *s = &bmc->soc;
+ AspeedSoCState *s = bmc->soc;
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
int uart_chosen = bmc->uart_chosen ? bmc->uart_chosen : amc->uart_default;
@@ -358,32 +358,33 @@ static void aspeed_machine_init(MachineState *machine)
int i;
NICInfo *nd = &nd_table[0];
- object_initialize_child(OBJECT(machine), "soc", &bmc->soc, amc->soc_name);
-
- sc = ASPEED_SOC_GET_CLASS(&bmc->soc);
+ bmc->soc = ASPEED_SOC(object_new(amc->soc_name));
+ object_property_add_child(OBJECT(machine), "soc", OBJECT(bmc->soc));
+ object_unref(OBJECT(bmc->soc));
+ sc = ASPEED_SOC_GET_CLASS(bmc->soc);
/*
* This will error out if the RAM size is not supported by the
* memory controller of the SoC.
*/
- object_property_set_uint(OBJECT(&bmc->soc), "ram-size", machine->ram_size,
+ object_property_set_uint(OBJECT(bmc->soc), "ram-size", machine->ram_size,
&error_fatal);
for (i = 0; i < sc->macs_num; i++) {
if ((amc->macs_mask & (1 << i)) && nd->used) {
qemu_check_nic_model(nd, TYPE_FTGMAC100);
- qdev_set_nic_properties(DEVICE(&bmc->soc.ftgmac100[i]), nd);
+ qdev_set_nic_properties(DEVICE(&bmc->soc->ftgmac100[i]), nd);
nd++;
}
}
- object_property_set_int(OBJECT(&bmc->soc), "hw-strap1", amc->hw_strap1,
+ object_property_set_int(OBJECT(bmc->soc), "hw-strap1", amc->hw_strap1,
&error_abort);
- object_property_set_int(OBJECT(&bmc->soc), "hw-strap2", amc->hw_strap2,
+ object_property_set_int(OBJECT(bmc->soc), "hw-strap2", amc->hw_strap2,
&error_abort);
- object_property_set_link(OBJECT(&bmc->soc), "memory",
+ object_property_set_link(OBJECT(bmc->soc), "memory",
OBJECT(get_system_memory()), &error_abort);
- object_property_set_link(OBJECT(&bmc->soc), "dram",
+ object_property_set_link(OBJECT(bmc->soc), "dram",
OBJECT(machine->ram), &error_abort);
if (machine->kernel_filename) {
/*
@@ -391,17 +392,17 @@ static void aspeed_machine_init(MachineState *machine)
* that runs to unlock the SCU. In this case set the default to
* be unlocked as the kernel expects
*/
- object_property_set_int(OBJECT(&bmc->soc), "hw-prot-key",
+ object_property_set_int(OBJECT(bmc->soc), "hw-prot-key",
ASPEED_SCU_PROT_KEY, &error_abort);
}
connect_serial_hds_to_uarts(bmc);
- qdev_realize(DEVICE(&bmc->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(bmc->soc), NULL, &error_abort);
if (defaults_enabled()) {
- aspeed_board_init_flashes(&bmc->soc.fmc,
+ aspeed_board_init_flashes(&bmc->soc->fmc,
bmc->fmc_model ? bmc->fmc_model : amc->fmc_model,
amc->num_cs, 0);
- aspeed_board_init_flashes(&bmc->soc.spi[0],
+ aspeed_board_init_flashes(&bmc->soc->spi[0],
bmc->spi_model ? bmc->spi_model : amc->spi_model,
1, amc->num_cs);
}
@@ -426,22 +427,22 @@ static void aspeed_machine_init(MachineState *machine)
amc->i2c_init(bmc);
}
- for (i = 0; i < bmc->soc.sdhci.num_slots; i++) {
- sdhci_attach_drive(&bmc->soc.sdhci.slots[i],
+ for (i = 0; i < bmc->soc->sdhci.num_slots; i++) {
+ sdhci_attach_drive(&bmc->soc->sdhci.slots[i],
drive_get(IF_SD, 0, i));
}
- if (bmc->soc.emmc.num_slots) {
- sdhci_attach_drive(&bmc->soc.emmc.slots[0],
- drive_get(IF_SD, 0, bmc->soc.sdhci.num_slots));
+ if (bmc->soc->emmc.num_slots) {
+ sdhci_attach_drive(&bmc->soc->emmc.slots[0],
+ drive_get(IF_SD, 0, bmc->soc->sdhci.num_slots));
}
if (!bmc->mmio_exec) {
- DeviceState *dev = ssi_get_cs(bmc->soc.fmc.spi, 0);
+ DeviceState *dev = ssi_get_cs(bmc->soc->fmc.spi, 0);
BlockBackend *fmc0 = dev ? m25p80_get_blk(dev) : NULL;
if (fmc0) {
- uint64_t rom_size = memory_region_size(&bmc->soc.spi_boot);
+ uint64_t rom_size = memory_region_size(&bmc->soc->spi_boot);
aspeed_install_boot_rom(bmc, fmc0, rom_size);
}
}
@@ -451,7 +452,7 @@ static void aspeed_machine_init(MachineState *machine)
static void palmetto_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
DeviceState *dev;
uint8_t *eeprom_buf = g_malloc0(32 * 1024);
@@ -473,7 +474,7 @@ static void palmetto_bmc_i2c_init(AspeedMachineState *bmc)
static void quanta_q71l_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
/*
* The quanta-q71l platform expects tmp75s which are compatible with
@@ -505,7 +506,7 @@ static void quanta_q71l_bmc_i2c_init(AspeedMachineState *bmc)
static void ast2500_evb_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
uint8_t *eeprom_buf = g_malloc0(8 * 1024);
smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 3), 0x50,
@@ -518,7 +519,7 @@ static void ast2500_evb_i2c_init(AspeedMachineState *bmc)
static void ast2600_evb_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
uint8_t *eeprom_buf = g_malloc0(8 * 1024);
smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 7), 0x50,
@@ -531,7 +532,7 @@ static void ast2600_evb_i2c_init(AspeedMachineState *bmc)
static void yosemitev2_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x51, 128 * KiB);
at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, 128 * KiB,
@@ -545,7 +546,7 @@ static void yosemitev2_bmc_i2c_init(AspeedMachineState *bmc)
static void romulus_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
/* The romulus board expects Epson RX8900 I2C RTC but a ds1338 is
* good enough */
@@ -554,7 +555,7 @@ static void romulus_bmc_i2c_init(AspeedMachineState *bmc)
static void tiogapass_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x54, 128 * KiB);
at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 6), 0x54, 128 * KiB,
@@ -573,7 +574,7 @@ static void create_pca9552(AspeedSoCState *soc, int bus_id, int addr)
static void sonorapass_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
/* bus 2 : */
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), "tmp105", 0x48);
@@ -627,7 +628,7 @@ static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc)
{14, LED_COLOR_GREEN, "front-power-3", GPIO_POLARITY_ACTIVE_LOW},
{15, LED_COLOR_GREEN, "front-id-5", GPIO_POLARITY_ACTIVE_LOW},
};
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
uint8_t *eeprom_buf = g_malloc0(8 * 1024);
DeviceState *dev;
LEDState *led;
@@ -672,7 +673,7 @@ static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc)
static void g220a_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
DeviceState *dev;
dev = DEVICE(i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 3),
@@ -708,7 +709,7 @@ static void g220a_bmc_i2c_init(AspeedMachineState *bmc)
static void fp5280g2_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
I2CSlave *i2c_mux;
/* The at24c256 */
@@ -735,7 +736,7 @@ static void fp5280g2_bmc_i2c_init(AspeedMachineState *bmc)
static void rainier_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
I2CSlave *i2c_mux;
at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 0), 0x51, 32 * KiB);
@@ -852,7 +853,7 @@ static void get_pca9548_channels(I2CBus *bus, uint8_t mux_addr,
static void fuji_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
I2CBus *i2c[144] = {};
for (int i = 0; i < 16; i++) {
@@ -930,7 +931,7 @@ static void fuji_bmc_i2c_init(AspeedMachineState *bmc)
static void bletchley_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
I2CBus *i2c[13] = {};
for (int i = 0; i < 13; i++) {
if ((i == 8) || (i == 11)) {
@@ -976,7 +977,7 @@ static void bletchley_bmc_i2c_init(AspeedMachineState *bmc)
static void fby35_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
I2CBus *i2c[16];
for (int i = 0; i < 16; i++) {
@@ -1008,14 +1009,14 @@ static void fby35_i2c_init(AspeedMachineState *bmc)
static void qcom_dc_scm_bmc_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 15), "tmp105", 0x4d);
}
static void qcom_dc_scm_firework_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
I2CSlave *therm_mux, *cpuvr_mux;
/* Create the generic DC-SCM hardware */
@@ -1477,7 +1478,7 @@ static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data)
static void fby35_reset(MachineState *state, ShutdownCause reason)
{
AspeedMachineState *bmc = ASPEED_MACHINE(state);
- AspeedGPIOState *gpio = &bmc->soc.gpio;
+ AspeedGPIOState *gpio = &bmc->soc->gpio;
qemu_devices_reset(reason);
@@ -1528,24 +1529,26 @@ static void aspeed_minibmc_machine_init(MachineState *machine)
sysclk = clock_new(OBJECT(machine), "SYSCLK");
clock_set_hz(sysclk, SYSCLK_FRQ);
- object_initialize_child(OBJECT(machine), "soc", &bmc->soc, amc->soc_name);
- qdev_connect_clock_in(DEVICE(&bmc->soc), "sysclk", sysclk);
+ bmc->soc = ASPEED_SOC(object_new(amc->soc_name));
+ object_property_add_child(OBJECT(machine), "soc", OBJECT(bmc->soc));
+ object_unref(OBJECT(bmc->soc));
+ qdev_connect_clock_in(DEVICE(bmc->soc), "sysclk", sysclk);
- object_property_set_link(OBJECT(&bmc->soc), "memory",
+ object_property_set_link(OBJECT(bmc->soc), "memory",
OBJECT(get_system_memory()), &error_abort);
connect_serial_hds_to_uarts(bmc);
- qdev_realize(DEVICE(&bmc->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(bmc->soc), NULL, &error_abort);
- aspeed_board_init_flashes(&bmc->soc.fmc,
+ aspeed_board_init_flashes(&bmc->soc->fmc,
bmc->fmc_model ? bmc->fmc_model : amc->fmc_model,
amc->num_cs,
0);
- aspeed_board_init_flashes(&bmc->soc.spi[0],
+ aspeed_board_init_flashes(&bmc->soc->spi[0],
bmc->spi_model ? bmc->spi_model : amc->spi_model,
amc->num_cs, amc->num_cs);
- aspeed_board_init_flashes(&bmc->soc.spi[1],
+ aspeed_board_init_flashes(&bmc->soc->spi[1],
bmc->spi_model ? bmc->spi_model : amc->spi_model,
amc->num_cs, (amc->num_cs * 2));
@@ -1561,7 +1564,7 @@ static void aspeed_minibmc_machine_init(MachineState *machine)
static void ast1030_evb_i2c_init(AspeedMachineState *bmc)
{
- AspeedSoCState *soc = &bmc->soc;
+ AspeedSoCState *soc = bmc->soc;
/* U10 24C08 connects to SDA/SCL Group 1 by default */
uint8_t *eeprom_buf = g_malloc0(32 * 1024);
diff --git a/hw/arm/aspeed_ast10x0.c b/hw/arm/aspeed_ast10x0.c
index 649b3b13c1..8becb146a8 100644
--- a/hw/arm/aspeed_ast10x0.c
+++ b/hw/arm/aspeed_ast10x0.c
@@ -101,13 +101,15 @@ static const int aspeed_soc_ast1030_irqmap[] = {
static qemu_irq aspeed_soc_ast1030_get_irq(AspeedSoCState *s, int dev)
{
+ Aspeed10x0SoCState *a = ASPEED10X0_SOC(s);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
- return qdev_get_gpio_in(DEVICE(&s->armv7m), sc->irqmap[dev]);
+ return qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[dev]);
}
static void aspeed_soc_ast1030_init(Object *obj)
{
+ Aspeed10x0SoCState *a = ASPEED10X0_SOC(obj);
AspeedSoCState *s = ASPEED_SOC(obj);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
char socname[8];
@@ -118,7 +120,7 @@ static void aspeed_soc_ast1030_init(Object *obj)
g_assert_not_reached();
}
- object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M);
+ object_initialize_child(obj, "armv7m", &a->armv7m, TYPE_ARMV7M);
s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0);
@@ -185,6 +187,7 @@ static void aspeed_soc_ast1030_init(Object *obj)
static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp)
{
+ Aspeed10x0SoCState *a = ASPEED10X0_SOC(dev_soc);
AspeedSoCState *s = ASPEED_SOC(dev_soc);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
DeviceState *armv7m;
@@ -206,17 +209,17 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp)
0x40000);
/* AST1030 CPU Core */
- armv7m = DEVICE(&s->armv7m);
+ armv7m = DEVICE(&a->armv7m);
qdev_prop_set_uint32(armv7m, "num-irq", 256);
qdev_prop_set_string(armv7m, "cpu-type", sc->cpu_type);
qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk);
- object_property_set_link(OBJECT(&s->armv7m), "memory",
+ object_property_set_link(OBJECT(&a->armv7m), "memory",
OBJECT(s->memory), &error_abort);
- sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(&a->armv7m), &error_abort);
/* Internal SRAM */
sram_name = g_strdup_printf("aspeed.sram.%d",
- CPU(s->armv7m.cpu)->cpu_index);
+ CPU(a->armv7m.cpu)->cpu_index);
memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err);
if (err != NULL) {
error_propagate(errp, err);
@@ -249,7 +252,7 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp)
}
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]);
for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) {
- qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->armv7m),
+ qemu_irq irq = qdev_get_gpio_in(DEVICE(&a->armv7m),
sc->irqmap[ASPEED_DEV_I2C] + i);
/* The AST1030 I2C controller has one IRQ per bus. */
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq);
@@ -261,7 +264,7 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp)
}
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]);
for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) {
- qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->armv7m),
+ qemu_irq irq = qdev_get_gpio_in(DEVICE(&a->armv7m),
sc->irqmap[ASPEED_DEV_I3C] + i);
/* The AST1030 I3C controller has one IRQ per bus. */
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i3c.devices[i]), 0, irq);
@@ -290,19 +293,19 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp)
* On the AST1030 LPC subdevice IRQs are connected straight to the GIC.
*/
sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_1,
- qdev_get_gpio_in(DEVICE(&s->armv7m),
+ qdev_get_gpio_in(DEVICE(&a->armv7m),
sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_1));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_2,
- qdev_get_gpio_in(DEVICE(&s->armv7m),
+ qdev_get_gpio_in(DEVICE(&a->armv7m),
sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_2));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_3,
- qdev_get_gpio_in(DEVICE(&s->armv7m),
+ qdev_get_gpio_in(DEVICE(&a->armv7m),
sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_3));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_4,
- qdev_get_gpio_in(DEVICE(&s->armv7m),
+ qdev_get_gpio_in(DEVICE(&a->armv7m),
sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_4));
/* UART */
@@ -435,18 +438,18 @@ static void aspeed_soc_ast1030_class_init(ObjectClass *klass, void *data)
sc->get_irq = aspeed_soc_ast1030_get_irq;
}
-static const TypeInfo aspeed_soc_ast1030_type_info = {
- .name = "ast1030-a1",
- .parent = TYPE_ASPEED_SOC,
- .instance_size = sizeof(AspeedSoCState),
- .instance_init = aspeed_soc_ast1030_init,
- .class_init = aspeed_soc_ast1030_class_init,
- .class_size = sizeof(AspeedSoCClass),
+static const TypeInfo aspeed_soc_ast10x0_types[] = {
+ {
+ .name = TYPE_ASPEED10X0_SOC,
+ .parent = TYPE_ASPEED_SOC,
+ .instance_size = sizeof(Aspeed10x0SoCState),
+ .abstract = true,
+ }, {
+ .name = "ast1030-a1",
+ .parent = TYPE_ASPEED10X0_SOC,
+ .instance_init = aspeed_soc_ast1030_init,
+ .class_init = aspeed_soc_ast1030_class_init,
+ },
};
-static void aspeed_soc_register_types(void)
-{
- type_register_static(&aspeed_soc_ast1030_type_info);
-}
-
-type_init(aspeed_soc_register_types)
+DEFINE_TYPES(aspeed_soc_ast10x0_types)
diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_ast2400.c
index bf22258de9..a4334c81b8 100644
--- a/hw/arm/aspeed_soc.c
+++ b/hw/arm/aspeed_ast2400.c
@@ -135,13 +135,15 @@ static const int aspeed_soc_ast2400_irqmap[] = {
static qemu_irq aspeed_soc_ast2400_get_irq(AspeedSoCState *s, int dev)
{
+ Aspeed2400SoCState *a = ASPEED2400_SOC(s);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
- return qdev_get_gpio_in(DEVICE(&s->vic), sc->irqmap[dev]);
+ return qdev_get_gpio_in(DEVICE(&a->vic), sc->irqmap[dev]);
}
-static void aspeed_soc_init(Object *obj)
+static void aspeed_ast2400_soc_init(Object *obj)
{
+ Aspeed2400SoCState *a = ASPEED2400_SOC(obj);
AspeedSoCState *s = ASPEED_SOC(obj);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
int i;
@@ -153,7 +155,7 @@ static void aspeed_soc_init(Object *obj)
}
for (i = 0; i < sc->num_cpus; i++) {
- object_initialize_child(obj, "cpu[*]", &s->cpu[i], sc->cpu_type);
+ object_initialize_child(obj, "cpu[*]", &a->cpu[i], sc->cpu_type);
}
snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname);
@@ -167,7 +169,7 @@ static void aspeed_soc_init(Object *obj)
object_property_add_alias(obj, "hw-prot-key", OBJECT(&s->scu),
"hw-prot-key");
- object_initialize_child(obj, "vic", &s->vic, TYPE_ASPEED_VIC);
+ object_initialize_child(obj, "vic", &a->vic, TYPE_ASPEED_VIC);
object_initialize_child(obj, "rtc", &s->rtc, TYPE_ASPEED_RTC);
@@ -239,9 +241,10 @@ static void aspeed_soc_init(Object *obj)
object_initialize_child(obj, "video", &s->video, TYPE_UNIMPLEMENTED_DEVICE);
}
-static void aspeed_soc_realize(DeviceState *dev, Error **errp)
+static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp)
{
int i;
+ Aspeed2400SoCState *a = ASPEED2400_SOC(dev);
AspeedSoCState *s = ASPEED_SOC(dev);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
Error *err = NULL;
@@ -264,15 +267,15 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
/* CPU */
for (i = 0; i < sc->num_cpus; i++) {
- object_property_set_link(OBJECT(&s->cpu[i]), "memory",
+ object_property_set_link(OBJECT(&a->cpu[i]), "memory",
OBJECT(s->memory), &error_abort);
- if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) {
+ if (!qdev_realize(DEVICE(&a->cpu[i]), NULL, errp)) {
return;
}
}
/* SRAM */
- sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&s->cpu[0])->cpu_index);
+ sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index);
memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err);
if (err) {
error_propagate(errp, err);
@@ -288,14 +291,14 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]);
/* VIC */
- if (!sysbus_realize(SYS_BUS_DEVICE(&s->vic), errp)) {
+ if (!sysbus_realize(SYS_BUS_DEVICE(&a->vic), errp)) {
return;
}
- aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->vic), 0, sc->memmap[ASPEED_DEV_VIC]);
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0,
- qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ));
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1,
- qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ));
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->vic), 0, sc->memmap[ASPEED_DEV_VIC]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&a->vic), 0,
+ qdev_get_gpio_in(DEVICE(&a->cpu), ARM_CPU_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&a->vic), 1,
+ qdev_get_gpio_in(DEVICE(&a->cpu), ARM_CPU_FIQ));
/* RTC */
if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) {
@@ -497,36 +500,15 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0,
aspeed_soc_get_irq(s, ASPEED_DEV_HACE));
}
-static Property aspeed_soc_properties[] = {
- DEFINE_PROP_LINK("memory", AspeedSoCState, memory, TYPE_MEMORY_REGION,
- MemoryRegion *),
- DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION,
- MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
-};
-static void aspeed_soc_class_init(ObjectClass *oc, void *data)
+static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data)
{
+ AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
- dc->realize = aspeed_soc_realize;
+ dc->realize = aspeed_ast2400_soc_realize;
/* Reason: Uses serial_hds and nd_table in realize() directly */
dc->user_creatable = false;
- device_class_set_props(dc, aspeed_soc_properties);
-}
-
-static const TypeInfo aspeed_soc_type_info = {
- .name = TYPE_ASPEED_SOC,
- .parent = TYPE_DEVICE,
- .instance_size = sizeof(AspeedSoCState),
- .class_size = sizeof(AspeedSoCClass),
- .class_init = aspeed_soc_class_init,
- .abstract = true,
-};
-
-static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data)
-{
- AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc);
sc->name = "ast2400-a1";
sc->cpu_type = ARM_CPU_TYPE_NAME("arm926");
@@ -543,17 +525,14 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data)
sc->get_irq = aspeed_soc_ast2400_get_irq;
}
-static const TypeInfo aspeed_soc_ast2400_type_info = {
- .name = "ast2400-a1",
- .parent = TYPE_ASPEED_SOC,
- .instance_init = aspeed_soc_init,
- .instance_size = sizeof(AspeedSoCState),
- .class_init = aspeed_soc_ast2400_class_init,
-};
-
static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data)
{
AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = aspeed_ast2400_soc_realize;
+ /* Reason: Uses serial_hds and nd_table in realize() directly */
+ dc->user_creatable = false;
sc->name = "ast2500-a1";
sc->cpu_type = ARM_CPU_TYPE_NAME("arm1176");
@@ -570,114 +549,22 @@ static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data)
sc->get_irq = aspeed_soc_ast2400_get_irq;
}
-static const TypeInfo aspeed_soc_ast2500_type_info = {
- .name = "ast2500-a1",
- .parent = TYPE_ASPEED_SOC,
- .instance_init = aspeed_soc_init,
- .instance_size = sizeof(AspeedSoCState),
- .class_init = aspeed_soc_ast2500_class_init,
-};
-static void aspeed_soc_register_types(void)
-{
- type_register_static(&aspeed_soc_type_info);
- type_register_static(&aspeed_soc_ast2400_type_info);
- type_register_static(&aspeed_soc_ast2500_type_info);
+static const TypeInfo aspeed_soc_ast2400_types[] = {
+ {
+ .name = TYPE_ASPEED2400_SOC,
+ .parent = TYPE_ASPEED_SOC,
+ .instance_init = aspeed_ast2400_soc_init,
+ .instance_size = sizeof(Aspeed2400SoCState),
+ .abstract = true,
+ }, {
+ .name = "ast2400-a1",
+ .parent = TYPE_ASPEED2400_SOC,
+ .class_init = aspeed_soc_ast2400_class_init,
+ }, {
+ .name = "ast2500-a1",
+ .parent = TYPE_ASPEED2400_SOC,
+ .class_init = aspeed_soc_ast2500_class_init,
+ },
};
-type_init(aspeed_soc_register_types);
-
-qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev)
-{
- return ASPEED_SOC_GET_CLASS(s)->get_irq(s, dev);
-}
-
-bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp)
-{
- AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
- SerialMM *smm;
-
- for (int i = 0, uart = ASPEED_DEV_UART1; i < sc->uarts_num; i++, uart++) {
- smm = &s->uart[i];
-
- /* Chardev property is set by the machine. */
- qdev_prop_set_uint8(DEVICE(smm), "regshift", 2);
- qdev_prop_set_uint32(DEVICE(smm), "baudbase", 38400);
- qdev_set_legacy_instance_id(DEVICE(smm), sc->memmap[uart], 2);
- qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN);
- if (!sysbus_realize(SYS_BUS_DEVICE(smm), errp)) {
- return false;
- }
-
- sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, aspeed_soc_get_irq(s, uart));
- aspeed_mmio_map(s, SYS_BUS_DEVICE(smm), 0, sc->memmap[uart]);
- }
-
- return true;
-}
-
-void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr)
-{
- AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
- int i = dev - ASPEED_DEV_UART1;
-
- g_assert(0 <= i && i < ARRAY_SIZE(s->uart) && i < sc->uarts_num);
- qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr);
-}
-
-/*
- * SDMC should be realized first to get correct RAM size and max size
- * values
- */
-bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp)
-{
- AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
- ram_addr_t ram_size, max_ram_size;
-
- ram_size = object_property_get_uint(OBJECT(&s->sdmc), "ram-size",
- &error_abort);
- max_ram_size = object_property_get_uint(OBJECT(&s->sdmc), "max-ram-size",
- &error_abort);
-
- memory_region_init(&s->dram_container, OBJECT(s), "ram-container",
- max_ram_size);
- memory_region_add_subregion(&s->dram_container, 0, s->dram_mr);
-
- /*
- * Add a memory region beyond the RAM region to let firmwares scan
- * the address space with load/store and guess how much RAM the
- * SoC has.
- */
- if (ram_size < max_ram_size) {
- DeviceState *dev = qdev_new(TYPE_UNIMPLEMENTED_DEVICE);
-
- qdev_prop_set_string(dev, "name", "ram-empty");
- qdev_prop_set_uint64(dev, "size", max_ram_size - ram_size);
- if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp)) {
- return false;
- }
-
- memory_region_add_subregion_overlap(&s->dram_container, ram_size,
- sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0), -1000);
- }
-
- memory_region_add_subregion(s->memory,
- sc->memmap[ASPEED_DEV_SDRAM], &s->dram_container);
- return true;
-}
-
-void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr)
-{
- memory_region_add_subregion(s->memory, addr,
- sysbus_mmio_get_region(dev, n));
-}
-
-void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev,
- const char *name, hwaddr addr, uint64_t size)
-{
- qdev_prop_set_string(DEVICE(dev), "name", name);
- qdev_prop_set_uint64(DEVICE(dev), "size", size);
- sysbus_realize(dev, &error_abort);
-
- memory_region_add_subregion_overlap(s->memory, addr,
- sysbus_mmio_get_region(dev, 0), -1000);
-}
+DEFINE_TYPES(aspeed_soc_ast2400_types)
diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c
index e122e1c32d..b965fbab5e 100644
--- a/hw/arm/aspeed_ast2600.c
+++ b/hw/arm/aspeed_ast2600.c
@@ -137,13 +137,15 @@ static const int aspeed_soc_ast2600_irqmap[] = {
static qemu_irq aspeed_soc_ast2600_get_irq(AspeedSoCState *s, int dev)
{
+ Aspeed2600SoCState *a = ASPEED2600_SOC(s);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
- return qdev_get_gpio_in(DEVICE(&s->a7mpcore), sc->irqmap[dev]);
+ return qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[dev]);
}
static void aspeed_soc_ast2600_init(Object *obj)
{
+ Aspeed2600SoCState *a = ASPEED2600_SOC(obj);
AspeedSoCState *s = ASPEED_SOC(obj);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
int i;
@@ -155,7 +157,7 @@ static void aspeed_soc_ast2600_init(Object *obj)
}
for (i = 0; i < sc->num_cpus; i++) {
- object_initialize_child(obj, "cpu[*]", &s->cpu[i], sc->cpu_type);
+ object_initialize_child(obj, "cpu[*]", &a->cpu[i], sc->cpu_type);
}
snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname);
@@ -169,7 +171,7 @@ static void aspeed_soc_ast2600_init(Object *obj)
object_property_add_alias(obj, "hw-prot-key", OBJECT(&s->scu),
"hw-prot-key");
- object_initialize_child(obj, "a7mpcore", &s->a7mpcore,
+ object_initialize_child(obj, "a7mpcore", &a->a7mpcore,
TYPE_A15MPCORE_PRIV);
object_initialize_child(obj, "rtc", &s->rtc, TYPE_ASPEED_RTC);
@@ -277,6 +279,7 @@ static uint64_t aspeed_calc_affinity(int cpu)
static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
{
int i;
+ Aspeed2600SoCState *a = ASPEED2600_SOC(dev);
AspeedSoCState *s = ASPEED_SOC(dev);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
Error *err = NULL;
@@ -306,39 +309,39 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
/* CPU */
for (i = 0; i < sc->num_cpus; i++) {
if (sc->num_cpus > 1) {
- object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar",
+ object_property_set_int(OBJECT(&a->cpu[i]), "reset-cbar",
ASPEED_A7MPCORE_ADDR, &error_abort);
}
- object_property_set_int(OBJECT(&s->cpu[i]), "mp-affinity",
+ object_property_set_int(OBJECT(&a->cpu[i]), "mp-affinity",
aspeed_calc_affinity(i), &error_abort);
- object_property_set_int(OBJECT(&s->cpu[i]), "cntfrq", 1125000000,
+ object_property_set_int(OBJECT(&a->cpu[i]), "cntfrq", 1125000000,
&error_abort);
- object_property_set_bool(OBJECT(&s->cpu[i]), "neon", false,
+ object_property_set_bool(OBJECT(&a->cpu[i]), "neon", false,
&error_abort);
- object_property_set_bool(OBJECT(&s->cpu[i]), "vfp-d32", false,
+ object_property_set_bool(OBJECT(&a->cpu[i]), "vfp-d32", false,
&error_abort);
- object_property_set_link(OBJECT(&s->cpu[i]), "memory",
+ object_property_set_link(OBJECT(&a->cpu[i]), "memory",
OBJECT(s->memory), &error_abort);
- if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) {
+ if (!qdev_realize(DEVICE(&a->cpu[i]), NULL, errp)) {
return;
}
}
/* A7MPCORE */
- object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", sc->num_cpus,
+ object_property_set_int(OBJECT(&a->a7mpcore), "num-cpu", sc->num_cpus,
&error_abort);
- object_property_set_int(OBJECT(&s->a7mpcore), "num-irq",
+ object_property_set_int(OBJECT(&a->a7mpcore), "num-irq",
ROUND_UP(AST2600_MAX_IRQ + GIC_INTERNAL, 32),
&error_abort);
- sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort);
- aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->a7mpcore), 0, ASPEED_A7MPCORE_ADDR);
+ sysbus_realize(SYS_BUS_DEVICE(&a->a7mpcore), &error_abort);
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->a7mpcore), 0, ASPEED_A7MPCORE_ADDR);
for (i = 0; i < sc->num_cpus; i++) {
- SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore);
- DeviceState *d = DEVICE(&s->cpu[i]);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(&a->a7mpcore);
+ DeviceState *d = DEVICE(&a->cpu[i]);
irq = qdev_get_gpio_in(d, ARM_CPU_IRQ);
sysbus_connect_irq(sbd, i, irq);
@@ -351,7 +354,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
}
/* SRAM */
- sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&s->cpu[0])->cpu_index);
+ sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index);
memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err);
if (err) {
error_propagate(errp, err);
@@ -413,7 +416,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
}
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]);
for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) {
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ irq = qdev_get_gpio_in(DEVICE(&a->a7mpcore),
sc->irqmap[ASPEED_DEV_I2C] + i);
/* The AST2600 I2C controller has one IRQ per bus. */
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq);
@@ -579,19 +582,19 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
* offset 0.
*/
sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_1,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ qdev_get_gpio_in(DEVICE(&a->a7mpcore),
sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_1));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_2,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ qdev_get_gpio_in(DEVICE(&a->a7mpcore),
sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_2));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_3,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ qdev_get_gpio_in(DEVICE(&a->a7mpcore),
sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_3));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_4,
- qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ qdev_get_gpio_in(DEVICE(&a->a7mpcore),
sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_4));
/* HACE */
@@ -611,7 +614,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
}
aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]);
for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) {
- irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ irq = qdev_get_gpio_in(DEVICE(&a->a7mpcore),
sc->irqmap[ASPEED_DEV_I3C] + i);
/* The AST2600 I3C controller has one IRQ per bus. */
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i3c.devices[i]), 0, irq);
@@ -646,18 +649,18 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data)
sc->get_irq = aspeed_soc_ast2600_get_irq;
}
-static const TypeInfo aspeed_soc_ast2600_type_info = {
- .name = "ast2600-a3",
- .parent = TYPE_ASPEED_SOC,
- .instance_size = sizeof(AspeedSoCState),
- .instance_init = aspeed_soc_ast2600_init,
- .class_init = aspeed_soc_ast2600_class_init,
- .class_size = sizeof(AspeedSoCClass),
+static const TypeInfo aspeed_soc_ast2600_types[] = {
+ {
+ .name = TYPE_ASPEED2600_SOC,
+ .parent = TYPE_ASPEED_SOC,
+ .instance_size = sizeof(Aspeed2600SoCState),
+ .abstract = true,
+ }, {
+ .name = "ast2600-a3",
+ .parent = TYPE_ASPEED2600_SOC,
+ .instance_init = aspeed_soc_ast2600_init,
+ .class_init = aspeed_soc_ast2600_class_init,
+ },
};
-static void aspeed_soc_register_types(void)
-{
- type_register_static(&aspeed_soc_ast2600_type_info);
-};
-
-type_init(aspeed_soc_register_types)
+DEFINE_TYPES(aspeed_soc_ast2600_types)
diff --git a/hw/arm/aspeed_soc_common.c b/hw/arm/aspeed_soc_common.c
new file mode 100644
index 0000000000..828f61093b
--- /dev/null
+++ b/hw/arm/aspeed_soc_common.c
@@ -0,0 +1,154 @@
+/*
+ * ASPEED SoC family
+ *
+ * Andrew Jeffery <andrew@aj.id.au>
+ * Jeremy Kerr <jk@ozlabs.org>
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+#include "hw/misc/unimp.h"
+#include "hw/arm/aspeed_soc.h"
+#include "hw/char/serial.h"
+
+
+qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev)
+{
+ return ASPEED_SOC_GET_CLASS(s)->get_irq(s, dev);
+}
+
+bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp)
+{
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ SerialMM *smm;
+
+ for (int i = 0, uart = ASPEED_DEV_UART1; i < sc->uarts_num; i++, uart++) {
+ smm = &s->uart[i];
+
+ /* Chardev property is set by the machine. */
+ qdev_prop_set_uint8(DEVICE(smm), "regshift", 2);
+ qdev_prop_set_uint32(DEVICE(smm), "baudbase", 38400);
+ qdev_set_legacy_instance_id(DEVICE(smm), sc->memmap[uart], 2);
+ qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN);
+ if (!sysbus_realize(SYS_BUS_DEVICE(smm), errp)) {
+ return false;
+ }
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, aspeed_soc_get_irq(s, uart));
+ aspeed_mmio_map(s, SYS_BUS_DEVICE(smm), 0, sc->memmap[uart]);
+ }
+
+ return true;
+}
+
+void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr)
+{
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ int i = dev - ASPEED_DEV_UART1;
+
+ g_assert(0 <= i && i < ARRAY_SIZE(s->uart) && i < sc->uarts_num);
+ qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr);
+}
+
+/*
+ * SDMC should be realized first to get correct RAM size and max size
+ * values
+ */
+bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp)
+{
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ ram_addr_t ram_size, max_ram_size;
+
+ ram_size = object_property_get_uint(OBJECT(&s->sdmc), "ram-size",
+ &error_abort);
+ max_ram_size = object_property_get_uint(OBJECT(&s->sdmc), "max-ram-size",
+ &error_abort);
+
+ memory_region_init(&s->dram_container, OBJECT(s), "ram-container",
+ max_ram_size);
+ memory_region_add_subregion(&s->dram_container, 0, s->dram_mr);
+
+ /*
+ * Add a memory region beyond the RAM region to let firmwares scan
+ * the address space with load/store and guess how much RAM the
+ * SoC has.
+ */
+ if (ram_size < max_ram_size) {
+ DeviceState *dev = qdev_new(TYPE_UNIMPLEMENTED_DEVICE);
+
+ qdev_prop_set_string(dev, "name", "ram-empty");
+ qdev_prop_set_uint64(dev, "size", max_ram_size - ram_size);
+ if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp)) {
+ return false;
+ }
+
+ memory_region_add_subregion_overlap(&s->dram_container, ram_size,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0), -1000);
+ }
+
+ memory_region_add_subregion(s->memory,
+ sc->memmap[ASPEED_DEV_SDRAM], &s->dram_container);
+ return true;
+}
+
+void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr)
+{
+ memory_region_add_subregion(s->memory, addr,
+ sysbus_mmio_get_region(dev, n));
+}
+
+void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev,
+ const char *name, hwaddr addr, uint64_t size)
+{
+ qdev_prop_set_string(DEVICE(dev), "name", name);
+ qdev_prop_set_uint64(DEVICE(dev), "size", size);
+ sysbus_realize(dev, &error_abort);
+
+ memory_region_add_subregion_overlap(s->memory, addr,
+ sysbus_mmio_get_region(dev, 0), -1000);
+}
+
+static void aspeed_soc_realize(DeviceState *dev, Error **errp)
+{
+ AspeedSoCState *s = ASPEED_SOC(dev);
+
+ if (!s->memory) {
+ error_setg(errp, "'memory' link is not set");
+ return;
+ }
+}
+
+static Property aspeed_soc_properties[] = {
+ DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION,
+ MemoryRegion *),
+ DEFINE_PROP_LINK("memory", AspeedSoCState, memory, TYPE_MEMORY_REGION,
+ MemoryRegion *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void aspeed_soc_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = aspeed_soc_realize;
+ device_class_set_props(dc, aspeed_soc_properties);
+}
+
+static const TypeInfo aspeed_soc_types[] = {
+ {
+ .name = TYPE_ASPEED_SOC,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(AspeedSoCState),
+ .class_size = sizeof(AspeedSoCClass),
+ .class_init = aspeed_soc_class_init,
+ .abstract = true,
+ },
+};
+
+DEFINE_TYPES(aspeed_soc_types)
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 24fa169060..84ea6a807a 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -722,84 +722,35 @@ static void do_cpu_reset(void *opaque)
cpu_set_pc(cs, entry);
} else {
- /* If we are booting Linux then we need to check whether we are
- * booting into secure or non-secure state and adjust the state
- * accordingly. Out of reset, ARM is defined to be in secure state
- * (SCR.NS = 0), we change that here if non-secure boot has been
- * requested.
+ /*
+ * If we are booting Linux then we might need to do so at:
+ * - AArch64 NS EL2 or NS EL1
+ * - AArch32 Secure SVC (EL3)
+ * - AArch32 NS Hyp (EL2)
+ * - AArch32 NS SVC (EL1)
+ * Configure the CPU in the way boot firmware would do to
+ * drop us down to the appropriate level.
*/
- if (arm_feature(env, ARM_FEATURE_EL3)) {
- /* AArch64 is defined to come out of reset into EL3 if enabled.
- * If we are booting Linux then we need to adjust our EL as
- * Linux expects us to be in EL2 or EL1. AArch32 resets into
- * SVC, which Linux expects, so no privilege/exception level to
- * adjust.
- */
- if (env->aarch64) {
- env->cp15.scr_el3 |= SCR_RW;
- if (arm_feature(env, ARM_FEATURE_EL2)) {
- env->cp15.hcr_el2 |= HCR_RW;
- env->pstate = PSTATE_MODE_EL2h;
- } else {
- env->pstate = PSTATE_MODE_EL1h;
- }
- if (cpu_isar_feature(aa64_pauth, cpu)) {
- env->cp15.scr_el3 |= SCR_API | SCR_APK;
- }
- if (cpu_isar_feature(aa64_mte, cpu)) {
- env->cp15.scr_el3 |= SCR_ATA;
- }
- if (cpu_isar_feature(aa64_sve, cpu)) {
- env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
- env->vfp.zcr_el[3] = 0xf;
- }
- if (cpu_isar_feature(aa64_sme, cpu)) {
- env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
- env->cp15.scr_el3 |= SCR_ENTP2;
- env->vfp.smcr_el[3] = 0xf;
- }
- if (cpu_isar_feature(aa64_hcx, cpu)) {
- env->cp15.scr_el3 |= SCR_HXEN;
- }
- if (cpu_isar_feature(aa64_fgt, cpu)) {
- env->cp15.scr_el3 |= SCR_FGTEN;
- }
+ int target_el = arm_feature(env, ARM_FEATURE_EL2) ? 2 : 1;
- /* AArch64 kernels never boot in secure mode */
- assert(!info->secure_boot);
- /* This hook is only supported for AArch32 currently:
- * bootloader_aarch64[] will not call the hook, and
- * the code above has already dropped us into EL2 or EL1.
- */
- assert(!info->secure_board_setup);
- }
-
- if (arm_feature(env, ARM_FEATURE_EL2)) {
- /* If we have EL2 then Linux expects the HVC insn to work */
- env->cp15.scr_el3 |= SCR_HCE;
- }
-
- /* Set to non-secure if not a secure boot */
- if (!info->secure_boot &&
- (cs != first_cpu || !info->secure_board_setup)) {
- /* Linux expects non-secure state */
- env->cp15.scr_el3 |= SCR_NS;
- /* Set NSACR.{CP11,CP10} so NS can access the FPU */
- env->cp15.nsacr |= 3 << 10;
- }
- }
-
- if (!env->aarch64 && !info->secure_boot &&
- arm_feature(env, ARM_FEATURE_EL2)) {
+ if (env->aarch64) {
/*
- * This is an AArch32 boot not to Secure state, and
- * we have Hyp mode available, so boot the kernel into
- * Hyp mode. This is not how the CPU comes out of reset,
- * so we need to manually put it there.
+ * AArch64 kernels never boot in secure mode, and we don't
+ * support the secure_board_setup hook for AArch64.
*/
- cpsr_write(env, ARM_CPU_MODE_HYP, CPSR_M, CPSRWriteRaw);
+ assert(!info->secure_boot);
+ assert(!info->secure_board_setup);
+ } else {
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
+ (info->secure_boot ||
+ (info->secure_board_setup && cs == first_cpu))) {
+ /* Start this CPU in Secure SVC */
+ target_el = 3;
+ }
}
+ arm_emulate_firmware_reset(cs, target_el);
+
if (cs == first_cpu) {
AddressSpace *as = arm_boot_address_space(cpu, info);
diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c
index f2ff6c1abf..c9964bd283 100644
--- a/hw/arm/fby35.c
+++ b/hw/arm/fby35.c
@@ -27,8 +27,8 @@ struct Fby35State {
MemoryRegion bic_memory;
Clock *bic_sysclk;
- AspeedSoCState bmc;
- AspeedSoCState bic;
+ Aspeed2600SoCState bmc;
+ Aspeed10x0SoCState bic;
bool mmio_exec;
};
@@ -70,7 +70,10 @@ static void fby35_bmc_write_boot_rom(DriveInfo *dinfo, MemoryRegion *mr,
static void fby35_bmc_init(Fby35State *s)
{
+ AspeedSoCState *soc;
+
object_initialize_child(OBJECT(s), "bmc", &s->bmc, "ast2600-a3");
+ soc = ASPEED_SOC(&s->bmc);
memory_region_init(&s->bmc_memory, OBJECT(&s->bmc), "bmc-memory",
UINT64_MAX);
@@ -87,22 +90,21 @@ static void fby35_bmc_init(Fby35State *s)
&error_abort);
object_property_set_int(OBJECT(&s->bmc), "hw-strap2", 0x00000003,
&error_abort);
- aspeed_soc_uart_set_chr(&s->bmc, ASPEED_DEV_UART5, serial_hd(0));
+ aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART5, serial_hd(0));
qdev_realize(DEVICE(&s->bmc), NULL, &error_abort);
- aspeed_board_init_flashes(&s->bmc.fmc, "n25q00", 2, 0);
+ aspeed_board_init_flashes(&soc->fmc, "n25q00", 2, 0);
/* Install first FMC flash content as a boot rom. */
if (!s->mmio_exec) {
DriveInfo *mtd0 = drive_get(IF_MTD, 0, 0);
if (mtd0) {
- AspeedSoCState *bmc = &s->bmc;
- uint64_t rom_size = memory_region_size(&bmc->spi_boot);
+ uint64_t rom_size = memory_region_size(&soc->spi_boot);
memory_region_init_rom(&s->bmc_boot_rom, NULL, "aspeed.boot_rom",
rom_size, &error_abort);
- memory_region_add_subregion_overlap(&bmc->spi_boot_container, 0,
+ memory_region_add_subregion_overlap(&soc->spi_boot_container, 0,
&s->bmc_boot_rom, 1);
fby35_bmc_write_boot_rom(mtd0, &s->bmc_boot_rom,
@@ -114,10 +116,13 @@ static void fby35_bmc_init(Fby35State *s)
static void fby35_bic_init(Fby35State *s)
{
+ AspeedSoCState *soc;
+
s->bic_sysclk = clock_new(OBJECT(s), "SYSCLK");
clock_set_hz(s->bic_sysclk, 200000000ULL);
object_initialize_child(OBJECT(s), "bic", &s->bic, "ast1030-a1");
+ soc = ASPEED_SOC(&s->bic);
memory_region_init(&s->bic_memory, OBJECT(&s->bic), "bic-memory",
UINT64_MAX);
@@ -125,12 +130,12 @@ static void fby35_bic_init(Fby35State *s)
qdev_connect_clock_in(DEVICE(&s->bic), "sysclk", s->bic_sysclk);
object_property_set_link(OBJECT(&s->bic), "memory", OBJECT(&s->bic_memory),
&error_abort);
- aspeed_soc_uart_set_chr(&s->bic, ASPEED_DEV_UART5, serial_hd(1));
+ aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART5, serial_hd(1));
qdev_realize(DEVICE(&s->bic), NULL, &error_abort);
- aspeed_board_init_flashes(&s->bic.fmc, "sst25vf032b", 2, 2);
- aspeed_board_init_flashes(&s->bic.spi[0], "sst25vf032b", 2, 4);
- aspeed_board_init_flashes(&s->bic.spi[1], "sst25vf032b", 2, 6);
+ aspeed_board_init_flashes(&soc->fmc, "sst25vf032b", 2, 2);
+ aspeed_board_init_flashes(&soc->spi[0], "sst25vf032b", 2, 4);
+ aspeed_board_init_flashes(&soc->spi[1], "sst25vf032b", 2, 6);
}
static void fby35_init(MachineState *machine)
diff --git a/hw/arm/meson.build b/hw/arm/meson.build
index a6feaf1af9..68245d3ad1 100644
--- a/hw/arm/meson.build
+++ b/hw/arm/meson.build
@@ -48,8 +48,9 @@ arm_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c'
arm_ss.add(when: 'CONFIG_FSL_IMX31', if_true: files('fsl-imx31.c', 'kzm.c'))
arm_ss.add(when: 'CONFIG_FSL_IMX6', if_true: files('fsl-imx6.c'))
arm_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files(
- 'aspeed_soc.c',
'aspeed.c',
+ 'aspeed_soc_common.c',
+ 'aspeed_ast2400.c',
'aspeed_ast2600.c',
'aspeed_ast10x0.c',
'aspeed_eeprom.c',
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
index 3c7dfcd6dc..e8a82618f0 100644
--- a/hw/arm/sbsa-ref.c
+++ b/hw/arm/sbsa-ref.c
@@ -2,6 +2,7 @@
* ARM SBSA Reference Platform emulation
*
* Copyright (c) 2018 Linaro Limited
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Written by Hongbo Zhang <hongbo.zhang@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
@@ -30,6 +31,7 @@
#include "exec/hwaddr.h"
#include "kvm_arm.h"
#include "hw/arm/boot.h"
+#include "hw/arm/bsa.h"
#include "hw/arm/fdt.h"
#include "hw/arm/smmuv3.h"
#include "hw/block/flash.h"
@@ -55,14 +57,6 @@
#define NUM_SMMU_IRQS 4
#define NUM_SATA_PORTS 6
-#define VIRTUAL_PMU_IRQ 7
-#define ARCH_GIC_MAINT_IRQ 9
-#define ARCH_TIMER_VIRT_IRQ 11
-#define ARCH_TIMER_S_EL1_IRQ 13
-#define ARCH_TIMER_NS_EL1_IRQ 14
-#define ARCH_TIMER_NS_EL2_IRQ 10
-#define ARCH_TIMER_NS_EL2_VIRT_IRQ 12
-
enum {
SBSA_FLASH,
SBSA_MEM,
@@ -479,7 +473,7 @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem)
*/
for (i = 0; i < smp_cpus; i++) {
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
- int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
+ int intidbase = NUM_IRQS + i * GIC_INTERNAL;
int irq;
/*
* Mapping from the output timer irq lines from the CPU to the
@@ -496,14 +490,17 @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem)
for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
qdev_connect_gpio_out(cpudev, irq,
qdev_get_gpio_in(sms->gic,
- ppibase + timer_irq[irq]));
+ intidbase + timer_irq[irq]));
}
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
- qdev_get_gpio_in(sms->gic, ppibase
+ qdev_get_gpio_in(sms->gic,
+ intidbase
+ ARCH_GIC_MAINT_IRQ));
+
qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
- qdev_get_gpio_in(sms->gic, ppibase
+ qdev_get_gpio_in(sms->gic,
+ intidbase
+ VIRTUAL_PMU_IRQ));
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
index 648c2e37a2..6076025ad6 100644
--- a/hw/arm/smmuv3-internal.h
+++ b/hw/arm/smmuv3-internal.h
@@ -38,33 +38,71 @@ REG32(IDR0, 0x0)
FIELD(IDR0, S1P, 1 , 1)
FIELD(IDR0, TTF, 2 , 2)
FIELD(IDR0, COHACC, 4 , 1)
+ FIELD(IDR0, BTM, 5 , 1)
+ FIELD(IDR0, HTTU, 6 , 2)
+ FIELD(IDR0, DORMHINT, 8 , 1)
+ FIELD(IDR0, HYP, 9 , 1)
+ FIELD(IDR0, ATS, 10, 1)
+ FIELD(IDR0, NS1ATS, 11, 1)
FIELD(IDR0, ASID16, 12, 1)
+ FIELD(IDR0, MSI, 13, 1)
+ FIELD(IDR0, SEV, 14, 1)
+ FIELD(IDR0, ATOS, 15, 1)
+ FIELD(IDR0, PRI, 16, 1)
+ FIELD(IDR0, VMW, 17, 1)
FIELD(IDR0, VMID16, 18, 1)
+ FIELD(IDR0, CD2L, 19, 1)
+ FIELD(IDR0, VATOS, 20, 1)
FIELD(IDR0, TTENDIAN, 21, 2)
+ FIELD(IDR0, ATSRECERR, 23, 1)
FIELD(IDR0, STALL_MODEL, 24, 2)
FIELD(IDR0, TERM_MODEL, 26, 1)
FIELD(IDR0, STLEVEL, 27, 2)
+ FIELD(IDR0, RME_IMPL, 30, 1)
REG32(IDR1, 0x4)
FIELD(IDR1, SIDSIZE, 0 , 6)
+ FIELD(IDR1, SSIDSIZE, 6 , 5)
+ FIELD(IDR1, PRIQS, 11, 5)
FIELD(IDR1, EVENTQS, 16, 5)
FIELD(IDR1, CMDQS, 21, 5)
+ FIELD(IDR1, ATTR_PERMS_OVR, 26, 1)
+ FIELD(IDR1, ATTR_TYPES_OVR, 27, 1)
+ FIELD(IDR1, REL, 28, 1)
+ FIELD(IDR1, QUEUES_PRESET, 29, 1)
+ FIELD(IDR1, TABLES_PRESET, 30, 1)
+ FIELD(IDR1, ECMDQ, 31, 1)
#define SMMU_IDR1_SIDSIZE 16
#define SMMU_CMDQS 19
#define SMMU_EVENTQS 19
REG32(IDR2, 0x8)
+ FIELD(IDR2, BA_VATOS, 0, 10)
+
REG32(IDR3, 0xc)
FIELD(IDR3, HAD, 2, 1);
+ FIELD(IDR3, PBHA, 3, 1);
+ FIELD(IDR3, XNX, 4, 1);
+ FIELD(IDR3, PPS, 5, 1);
+ FIELD(IDR3, MPAM, 7, 1);
+ FIELD(IDR3, FWB, 8, 1);
+ FIELD(IDR3, STT, 9, 1);
FIELD(IDR3, RIL, 10, 1);
FIELD(IDR3, BBML, 11, 2);
+ FIELD(IDR3, E0PD, 13, 1);
+ FIELD(IDR3, PTWNNC, 14, 1);
+ FIELD(IDR3, DPT, 15, 1);
+
REG32(IDR4, 0x10)
+
REG32(IDR5, 0x14)
FIELD(IDR5, OAS, 0, 3);
FIELD(IDR5, GRAN4K, 4, 1);
FIELD(IDR5, GRAN16K, 5, 1);
FIELD(IDR5, GRAN64K, 6, 1);
+ FIELD(IDR5, VAX, 10, 2);
+ FIELD(IDR5, STALL_MAX, 16, 16);
#define SMMU_IDR5_OAS 4
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 6f2b2bd45f..c3871ae067 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -278,15 +278,19 @@ static void smmuv3_init_regs(SMMUv3State *s)
s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
- s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
+ if (FIELD_EX32(s->idr[0], IDR0, S2P)) {
+ /* XNX is a stage-2-specific feature */
+ s->idr[3] = FIELD_DP32(s->idr[3], IDR3, XNX, 1);
+ }
+ s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2);
+ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
/* 4K, 16K and 64K granule support */
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
- s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
s->cmdq.prod = 0;
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 6b674231c2..9ce136cd88 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -601,21 +601,21 @@ build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
* The interrupt values are the same with the device tree when adding 16
*/
/* Secure EL1 timer GSIV */
- build_append_int_noprefix(table_data, ARCH_TIMER_S_EL1_IRQ + 16, 4);
+ build_append_int_noprefix(table_data, ARCH_TIMER_S_EL1_IRQ, 4);
/* Secure EL1 timer Flags */
build_append_int_noprefix(table_data, irqflags, 4);
/* Non-Secure EL1 timer GSIV */
- build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL1_IRQ + 16, 4);
+ build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL1_IRQ, 4);
/* Non-Secure EL1 timer Flags */
build_append_int_noprefix(table_data, irqflags |
1UL << 2, /* Always-on Capability */
4);
/* Virtual timer GSIV */
- build_append_int_noprefix(table_data, ARCH_TIMER_VIRT_IRQ + 16, 4);
+ build_append_int_noprefix(table_data, ARCH_TIMER_VIRT_IRQ, 4);
/* Virtual Timer Flags */
build_append_int_noprefix(table_data, irqflags, 4);
/* Non-Secure EL2 timer GSIV */
- build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL2_IRQ + 16, 4);
+ build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL2_IRQ, 4);
/* Non-Secure EL2 timer Flags */
build_append_int_noprefix(table_data, irqflags, 4);
/* CntReadBase Physical address */
@@ -729,9 +729,9 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
for (i = 0; i < MACHINE(vms)->smp.cpus; i++) {
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
uint64_t physical_base_address = 0, gich = 0, gicv = 0;
- uint32_t vgic_interrupt = vms->virt ? PPI(ARCH_GIC_MAINT_IRQ) : 0;
+ uint32_t vgic_interrupt = vms->virt ? ARCH_GIC_MAINT_IRQ : 0;
uint32_t pmu_interrupt = arm_feature(&armcpu->env, ARM_FEATURE_PMU) ?
- PPI(VIRTUAL_PMU_IRQ) : 0;
+ VIRTUAL_PMU_IRQ : 0;
if (vms->gic_version == VIRT_GIC_VERSION_2) {
physical_base_address = memmap[VIRT_GIC_CPU].base;
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 15e74249f9..529f1c089c 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -366,10 +366,14 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms)
}
qemu_fdt_setprop(ms->fdt, "/timer", "always-on", NULL, 0);
qemu_fdt_setprop_cells(ms->fdt, "/timer", "interrupts",
- GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_S_EL1_IRQ, irqflags,
- GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL1_IRQ, irqflags,
- GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_VIRT_IRQ, irqflags,
- GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL2_IRQ, irqflags);
+ GIC_FDT_IRQ_TYPE_PPI,
+ INTID_TO_PPI(ARCH_TIMER_S_EL1_IRQ), irqflags,
+ GIC_FDT_IRQ_TYPE_PPI,
+ INTID_TO_PPI(ARCH_TIMER_NS_EL1_IRQ), irqflags,
+ GIC_FDT_IRQ_TYPE_PPI,
+ INTID_TO_PPI(ARCH_TIMER_VIRT_IRQ), irqflags,
+ GIC_FDT_IRQ_TYPE_PPI,
+ INTID_TO_PPI(ARCH_TIMER_NS_EL2_IRQ), irqflags);
}
static void fdt_add_cpu_nodes(const VirtMachineState *vms)
@@ -647,13 +651,12 @@ static inline DeviceState *create_acpi_ged(VirtMachineState *vms)
dev = qdev_new(TYPE_ACPI_GED);
qdev_prop_set_uint32(dev, "ged-event", event);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_ACPI_GED].base);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, vms->memmap[VIRT_PCDIMM_ACPI].base);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(vms->gic, irq));
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
-
return dev;
}
@@ -691,10 +694,10 @@ static void create_v2m(VirtMachineState *vms)
DeviceState *dev;
dev = qdev_new("arm-gicv2m");
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_GIC_V2M].base);
qdev_prop_set_uint32(dev, "base-spi", irq);
qdev_prop_set_uint32(dev, "num-spi", NUM_GICV2M_SPIS);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_GIC_V2M].base);
for (i = 0; i < NUM_GICV2M_SPIS; i++) {
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i,
@@ -800,7 +803,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
*/
for (i = 0; i < smp_cpus; i++) {
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
- int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS;
+ int intidbase = NUM_IRQS + i * GIC_INTERNAL;
/* Mapping from the output timer irq lines from the CPU to the
* GIC PPI inputs we use for the virt board.
*/
@@ -814,22 +817,22 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
for (unsigned irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
qdev_connect_gpio_out(cpudev, irq,
qdev_get_gpio_in(vms->gic,
- ppibase + timer_irq[irq]));
+ intidbase + timer_irq[irq]));
}
if (vms->gic_version != VIRT_GIC_VERSION_2) {
qemu_irq irq = qdev_get_gpio_in(vms->gic,
- ppibase + ARCH_GIC_MAINT_IRQ);
+ intidbase + ARCH_GIC_MAINT_IRQ);
qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt",
0, irq);
} else if (vms->virt) {
qemu_irq irq = qdev_get_gpio_in(vms->gic,
- ppibase + ARCH_GIC_MAINT_IRQ);
+ intidbase + ARCH_GIC_MAINT_IRQ);
sysbus_connect_irq(gicbusdev, i + 4 * smp_cpus, irq);
}
qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
- qdev_get_gpio_in(vms->gic, ppibase
+ qdev_get_gpio_in(vms->gic, intidbase
+ VIRTUAL_PMU_IRQ));
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
@@ -1989,7 +1992,7 @@ static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem)
if (pmu) {
assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU));
if (kvm_irqchip_in_kernel()) {
- kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ));
+ kvm_arm_pmu_set_irq(cpu, VIRTUAL_PMU_IRQ);
}
kvm_arm_pmu_init(cpu);
}
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index eecf3f7a81..818b833108 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -32,8 +32,6 @@
#include "sysemu/sysemu.h"
#include "sysemu/runstate.h"
-#define REALIZE_CONNECTION_RETRIES 3
-
static const int user_feature_bits[] = {
VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_SEG_MAX,
@@ -393,7 +391,7 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
case CHR_EVENT_CLOSED:
/* defer close until later to avoid circular close */
vhost_user_async_close(dev, &s->chardev, &s->dev,
- vhost_user_blk_disconnect);
+ vhost_user_blk_disconnect, vhost_user_blk_event);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
@@ -405,7 +403,7 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
static int vhost_user_blk_realize_connect(VHostUserBlk *s, Error **errp)
{
- DeviceState *dev = &s->parent_obj.parent_obj;
+ DeviceState *dev = DEVICE(s);
int ret;
s->connected = false;
@@ -423,7 +421,7 @@ static int vhost_user_blk_realize_connect(VHostUserBlk *s, Error **errp)
assert(s->connected);
ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg,
- s->parent_obj.config_len, errp);
+ VIRTIO_DEVICE(s)->config_len, errp);
if (ret < 0) {
qemu_chr_fe_disconnect(&s->chardev);
vhost_dev_cleanup(&s->dev);
@@ -482,7 +480,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
s->inflight = g_new0(struct vhost_inflight, 1);
s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues);
- retries = REALIZE_CONNECTION_RETRIES;
+ retries = VU_REALIZE_CONN_RETRIES;
assert(!*errp);
do {
if (*errp) {
diff --git a/hw/char/escc.c b/hw/char/escc.c
index 4be66053c1..48b30ee760 100644
--- a/hw/char/escc.c
+++ b/hw/char/escc.c
@@ -845,7 +845,7 @@ static void sunkbd_handle_event(DeviceState *dev, QemuConsole *src,
put_queue(s, keycode);
}
-static QemuInputHandler sunkbd_handler = {
+static const QemuInputHandler sunkbd_handler = {
.name = "sun keyboard",
.mask = INPUT_EVENT_MASK_KEY,
.event = sunkbd_handle_event,
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
index 5eaf2e79e6..d0d6a910f9 100644
--- a/hw/core/cpu-sysemu.c
+++ b/hw/core/cpu-sysemu.c
@@ -34,17 +34,17 @@ bool cpu_paging_enabled(const CPUState *cpu)
return false;
}
-void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->sysemu_ops->get_memory_mapping) {
- cc->sysemu_ops->get_memory_mapping(cpu, list, errp);
- return;
+ return cc->sysemu_ops->get_memory_mapping(cpu, list, errp);
}
error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
+ return false;
}
hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c
index c3e55ef9e9..9a4b59c6f2 100644
--- a/hw/core/machine-hmp-cmds.c
+++ b/hw/core/machine-hmp-cmds.c
@@ -71,6 +71,12 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict)
if (c->has_node_id) {
monitor_printf(mon, " node-id: \"%" PRIu64 "\"\n", c->node_id);
}
+ if (c->has_drawer_id) {
+ monitor_printf(mon, " drawer-id: \"%" PRIu64 "\"\n", c->drawer_id);
+ }
+ if (c->has_book_id) {
+ monitor_printf(mon, " book-id: \"%" PRIu64 "\"\n", c->book_id);
+ }
if (c->has_socket_id) {
monitor_printf(mon, " socket-id: \"%" PRIu64 "\"\n", c->socket_id);
}
diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c
index 0f4d9b6f7a..25019c91ee 100644
--- a/hw/core/machine-smp.c
+++ b/hw/core/machine-smp.c
@@ -33,6 +33,14 @@ static char *cpu_hierarchy_to_string(MachineState *ms)
MachineClass *mc = MACHINE_GET_CLASS(ms);
GString *s = g_string_new(NULL);
+ if (mc->smp_props.drawers_supported) {
+ g_string_append_printf(s, "drawers (%u) * ", ms->smp.drawers);
+ }
+
+ if (mc->smp_props.books_supported) {
+ g_string_append_printf(s, "books (%u) * ", ms->smp.books);
+ }
+
g_string_append_printf(s, "sockets (%u)", ms->smp.sockets);
if (mc->smp_props.dies_supported) {
@@ -75,6 +83,8 @@ void machine_parse_smp_config(MachineState *ms,
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
unsigned cpus = config->has_cpus ? config->cpus : 0;
+ unsigned drawers = config->has_drawers ? config->drawers : 0;
+ unsigned books = config->has_books ? config->books : 0;
unsigned sockets = config->has_sockets ? config->sockets : 0;
unsigned dies = config->has_dies ? config->dies : 0;
unsigned clusters = config->has_clusters ? config->clusters : 0;
@@ -87,6 +97,8 @@ void machine_parse_smp_config(MachineState *ms,
* explicit configuration like "cpus=0" is not allowed.
*/
if ((config->has_cpus && config->cpus == 0) ||
+ (config->has_drawers && config->drawers == 0) ||
+ (config->has_books && config->books == 0) ||
(config->has_sockets && config->sockets == 0) ||
(config->has_dies && config->dies == 0) ||
(config->has_clusters && config->clusters == 0) ||
@@ -113,6 +125,19 @@ void machine_parse_smp_config(MachineState *ms,
dies = dies > 0 ? dies : 1;
clusters = clusters > 0 ? clusters : 1;
+ if (!mc->smp_props.books_supported && books > 1) {
+ error_setg(errp, "books not supported by this machine's CPU topology");
+ return;
+ }
+ books = books > 0 ? books : 1;
+
+ if (!mc->smp_props.drawers_supported && drawers > 1) {
+ error_setg(errp,
+ "drawers not supported by this machine's CPU topology");
+ return;
+ }
+ drawers = drawers > 0 ? drawers : 1;
+
/* compute missing values based on the provided ones */
if (cpus == 0 && maxcpus == 0) {
sockets = sockets > 0 ? sockets : 1;
@@ -126,33 +151,41 @@ void machine_parse_smp_config(MachineState *ms,
if (sockets == 0) {
cores = cores > 0 ? cores : 1;
threads = threads > 0 ? threads : 1;
- sockets = maxcpus / (dies * clusters * cores * threads);
+ sockets = maxcpus /
+ (drawers * books * dies * clusters * cores * threads);
} else if (cores == 0) {
threads = threads > 0 ? threads : 1;
- cores = maxcpus / (sockets * dies * clusters * threads);
+ cores = maxcpus /
+ (drawers * books * sockets * dies * clusters * threads);
}
} else {
/* prefer cores over sockets since 6.2 */
if (cores == 0) {
sockets = sockets > 0 ? sockets : 1;
threads = threads > 0 ? threads : 1;
- cores = maxcpus / (sockets * dies * clusters * threads);
+ cores = maxcpus /
+ (drawers * books * sockets * dies * clusters * threads);
} else if (sockets == 0) {
threads = threads > 0 ? threads : 1;
- sockets = maxcpus / (dies * clusters * cores * threads);
+ sockets = maxcpus /
+ (drawers * books * dies * clusters * cores * threads);
}
}
/* try to calculate omitted threads at last */
if (threads == 0) {
- threads = maxcpus / (sockets * dies * clusters * cores);
+ threads = maxcpus /
+ (drawers * books * sockets * dies * clusters * cores);
}
}
- maxcpus = maxcpus > 0 ? maxcpus : sockets * dies * clusters * cores * threads;
+ maxcpus = maxcpus > 0 ? maxcpus : drawers * books * sockets * dies *
+ clusters * cores * threads;
cpus = cpus > 0 ? cpus : maxcpus;
ms->smp.cpus = cpus;
+ ms->smp.drawers = drawers;
+ ms->smp.books = books;
ms->smp.sockets = sockets;
ms->smp.dies = dies;
ms->smp.clusters = clusters;
@@ -163,7 +196,8 @@ void machine_parse_smp_config(MachineState *ms,
mc->smp_props.has_clusters = config->has_clusters;
/* sanity-check of the computed topology */
- if (sockets * dies * clusters * cores * threads != maxcpus) {
+ if (drawers * books * sockets * dies * clusters * cores * threads !=
+ maxcpus) {
g_autofree char *topo_msg = cpu_hierarchy_to_string(ms);
error_setg(errp, "Invalid CPU topology: "
"product of the hierarchy must match maxcpus: "
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 05aef2cf9f..50edaab737 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -863,6 +863,8 @@ static void machine_get_smp(Object *obj, Visitor *v, const char *name,
MachineState *ms = MACHINE(obj);
SMPConfiguration *config = &(SMPConfiguration){
.has_cpus = true, .cpus = ms->smp.cpus,
+ .has_drawers = true, .drawers = ms->smp.drawers,
+ .has_books = true, .books = ms->smp.books,
.has_sockets = true, .sockets = ms->smp.sockets,
.has_dies = true, .dies = ms->smp.dies,
.has_clusters = true, .clusters = ms->smp.clusters,
@@ -1137,6 +1139,8 @@ static void machine_initfn(Object *obj)
/* default to mc->default_cpus */
ms->smp.cpus = mc->default_cpus;
ms->smp.max_cpus = mc->default_cpus;
+ ms->smp.drawers = 1;
+ ms->smp.books = 1;
ms->smp.sockets = 1;
ms->smp.dies = 1;
ms->smp.clusters = 1;
diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index 688340610e..7c6dfab128 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -1139,3 +1139,16 @@ const PropertyInfo qdev_prop_uuid = {
.set = set_uuid,
.set_default_value = set_default_uuid_auto,
};
+
+/* --- s390 cpu entitlement policy --- */
+
+QEMU_BUILD_BUG_ON(sizeof(CpuS390Entitlement) != sizeof(int));
+
+const PropertyInfo qdev_prop_cpus390entitlement = {
+ .name = "CpuS390Entitlement",
+ .description = "low/medium (default)/high",
+ .enum_table = &CpuS390Entitlement_lookup,
+ .get = qdev_propinfo_get_enum,
+ .set = qdev_propinfo_set_enum,
+ .set_default_value = qdev_propinfo_set_default_value_enum,
+};
diff --git a/hw/display/virtio-dmabuf.c b/hw/display/virtio-dmabuf.c
index 4a8e430f3d..3dba4577ca 100644
--- a/hw/display/virtio-dmabuf.c
+++ b/hw/display/virtio-dmabuf.c
@@ -29,7 +29,7 @@ static int uuid_equal_func(const void *lhv, const void *rhv)
static bool virtio_add_resource(QemuUUID *uuid, VirtioSharedObject *value)
{
- bool result = false;
+ bool result = true;
g_mutex_lock(&lock);
if (resource_uuids == NULL) {
@@ -39,7 +39,9 @@ static bool virtio_add_resource(QemuUUID *uuid, VirtioSharedObject *value)
g_free);
}
if (g_hash_table_lookup(resource_uuids, uuid) == NULL) {
- result = g_hash_table_insert(resource_uuids, uuid, value);
+ g_hash_table_insert(resource_uuids, uuid, value);
+ } else {
+ result = false;
}
g_mutex_unlock(&lock);
@@ -57,6 +59,9 @@ bool virtio_add_dmabuf(QemuUUID *uuid, int udmabuf_fd)
vso->type = TYPE_DMABUF;
vso->value = GINT_TO_POINTER(udmabuf_fd);
result = virtio_add_resource(uuid, vso);
+ if (!result) {
+ g_free(vso);
+ }
return result;
}
@@ -72,6 +77,9 @@ bool virtio_add_vhost_device(QemuUUID *uuid, struct vhost_dev *dev)
vso->type = TYPE_VHOST_DEV;
vso->value = dev;
result = virtio_add_resource(uuid, vso);
+ if (!result) {
+ g_free(vso);
+ }
return result;
}
diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c
index 50c5373b65..37af256219 100644
--- a/hw/display/virtio-gpu-base.c
+++ b/hw/display/virtio-gpu-base.c
@@ -184,8 +184,7 @@ virtio_gpu_base_device_realize(DeviceState *qdev,
if (virtio_gpu_virgl_enabled(g->conf)) {
error_setg(&g->migration_blocker, "virgl is not yet migratable");
- if (migrate_add_blocker(g->migration_blocker, errp) < 0) {
- error_free(g->migration_blocker);
+ if (migrate_add_blocker(&g->migration_blocker, errp) < 0) {
return false;
}
}
@@ -253,10 +252,7 @@ virtio_gpu_base_device_unrealize(DeviceState *qdev)
{
VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
- if (g->migration_blocker) {
- migrate_del_blocker(g->migration_blocker);
- error_free(g->migration_blocker);
- }
+ migrate_del_blocker(&g->migration_blocker);
}
static void
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 6efd15b6ae..4265316cbb 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -1128,7 +1128,7 @@ static void virtio_gpu_ctrl_bh(void *opaque)
VirtIOGPU *g = opaque;
VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
- vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
+ vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq);
}
static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 0074a9b6f8..b2130a0d70 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -321,20 +321,20 @@ static void xenfb_mouse_sync(DeviceState *dev)
xenfb->wheel = 0;
}
-static QemuInputHandler xenfb_keyboard = {
+static const QemuInputHandler xenfb_keyboard = {
.name = "Xen PV Keyboard",
.mask = INPUT_EVENT_MASK_KEY,
.event = xenfb_key_event,
};
-static QemuInputHandler xenfb_abs_mouse = {
+static const QemuInputHandler xenfb_abs_mouse = {
.name = "Xen PV Mouse",
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS,
.event = xenfb_mouse_event,
.sync = xenfb_mouse_sync,
};
-static QemuInputHandler xenfb_rel_mouse = {
+static const QemuInputHandler xenfb_rel_mouse = {
.name = "Xen PV Mouse",
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
.event = xenfb_mouse_event,
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
index 12c90267df..0ae056ed06 100644
--- a/hw/dma/xilinx_axidma.c
+++ b/hw/dma/xilinx_axidma.c
@@ -577,10 +577,6 @@ static void xilinx_axidma_init(Object *obj)
object_initialize_child(OBJECT(s), "axistream-control-connected-target",
&s->rx_control_dev,
TYPE_XILINX_AXI_DMA_CONTROL_STREAM);
- object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
- (Object **)&s->dma_mr,
- qdev_prop_allow_set_link_before_realize,
- OBJ_PROP_LINK_STRONG);
sysbus_init_irq(sbd, &s->streams[0].irq);
sysbus_init_irq(sbd, &s->streams[1].irq);
@@ -596,6 +592,8 @@ static Property axidma_properties[] = {
tx_data_dev, TYPE_STREAM_SINK, StreamSink *),
DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIDMA,
tx_control_dev, TYPE_STREAM_SINK, StreamSink *),
+ DEFINE_PROP_LINK("dma", XilinxAXIDMA, dma_mr,
+ TYPE_MEMORY_REGION, MemoryRegion *),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c
index 4eb7f66e9f..84c0083013 100644
--- a/hw/dma/xlnx-zdma.c
+++ b/hw/dma/xlnx-zdma.c
@@ -795,11 +795,6 @@ static void zdma_init(Object *obj)
TYPE_XLNX_ZDMA, ZDMA_R_MAX * 4);
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->irq_zdma_ch_imr);
-
- object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
- (Object **)&s->dma_mr,
- qdev_prop_allow_set_link_before_realize,
- OBJ_PROP_LINK_STRONG);
}
static const VMStateDescription vmstate_zdma = {
@@ -817,6 +812,8 @@ static const VMStateDescription vmstate_zdma = {
static Property zdma_props[] = {
DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64),
+ DEFINE_PROP_LINK("dma", XlnxZDMA, dma_mr,
+ TYPE_MEMORY_REGION, MemoryRegion *),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c
index 88002698a1..e89089821a 100644
--- a/hw/dma/xlnx_csu_dma.c
+++ b/hw/dma/xlnx_csu_dma.c
@@ -702,6 +702,10 @@ static Property xlnx_csu_dma_properties[] = {
* which channel the device is connected to.
*/
DEFINE_PROP_BOOL("is-dst", XlnxCSUDMA, is_dst, true),
+ DEFINE_PROP_LINK("stream-connected-dma", XlnxCSUDMA, tx_dev,
+ TYPE_STREAM_SINK, StreamSink *),
+ DEFINE_PROP_LINK("dma", XlnxCSUDMA, dma_mr,
+ TYPE_MEMORY_REGION, MemoryRegion *),
DEFINE_PROP_END_OF_LIST(),
};
@@ -728,15 +732,6 @@ static void xlnx_csu_dma_init(Object *obj)
memory_region_init(&s->iomem, obj, TYPE_XLNX_CSU_DMA,
XLNX_CSU_DMA_R_MAX * 4);
-
- object_property_add_link(obj, "stream-connected-dma", TYPE_STREAM_SINK,
- (Object **)&s->tx_dev,
- qdev_prop_allow_set_link_before_realize,
- OBJ_PROP_LINK_STRONG);
- object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
- (Object **)&s->dma_mr,
- qdev_prop_allow_set_link_before_realize,
- OBJ_PROP_LINK_STRONG);
}
static const TypeInfo xlnx_csu_dma_info = {
diff --git a/hw/hppa/Kconfig b/hw/hppa/Kconfig
index 5dd8b5b21e..ff8528aaa8 100644
--- a/hw/hppa/Kconfig
+++ b/hw/hppa/Kconfig
@@ -3,6 +3,7 @@ config HPPA_B160L
imply PCI_DEVICES
imply E1000_PCI
imply VIRTIO_VGA
+ select ASTRO
select DINO
select LASI
select SERIAL
diff --git a/hw/hppa/hppa_hardware.h b/hw/hppa/hppa_hardware.h
index a5ac3dd0fd..a9be7bb851 100644
--- a/hw/hppa/hppa_hardware.h
+++ b/hw/hppa/hppa_hardware.h
@@ -18,7 +18,6 @@
#define LASI_UART_HPA 0xffd05000
#define LASI_SCSI_HPA 0xffd06000
#define LASI_LAN_HPA 0xffd07000
-#define LASI_RTC_HPA 0xffd09000
#define LASI_LPT_HPA 0xffd02000
#define LASI_AUDIO_HPA 0xffd04000
#define LASI_PS2KBD_HPA 0xffd08000
diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c
index cf28cb9586..67d4d1b5e0 100644
--- a/hw/hppa/machine.c
+++ b/hw/hppa/machine.c
@@ -1,6 +1,8 @@
/*
* QEMU HPPA hardware system emulator.
- * Copyright 2018 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2018-2023 Helge Deller <deller@gmx.de>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
*/
#include "qemu/osdep.h"
@@ -20,7 +22,10 @@
#include "hw/input/lasips2.h"
#include "hw/net/lasi_82596.h"
#include "hw/nmi.h"
+#include "hw/usb.h"
#include "hw/pci/pci.h"
+#include "hw/pci/pci_device.h"
+#include "hw/pci-host/astro.h"
#include "hw/pci-host/dino.h"
#include "hw/misc/lasi.h"
#include "hppa_hardware.h"
@@ -29,12 +34,13 @@
#include "net/net.h"
#include "qemu/log.h"
-#define MIN_SEABIOS_HPPA_VERSION 6 /* require at least this fw version */
+#define MIN_SEABIOS_HPPA_VERSION 10 /* require at least this fw version */
#define HPA_POWER_BUTTON (FIRMWARE_END - 0x10)
#define enable_lasi_lan() 0
+static DeviceState *lasi_dev;
static void hppa_powerdown_req(Notifier *n, void *opaque)
{
@@ -95,14 +101,69 @@ static ISABus *hppa_isa_bus(void)
isa_bus = isa_bus_new(NULL, get_system_memory(), isa_region,
&error_abort);
- isa_irqs = i8259_init(isa_bus,
- /* qemu_allocate_irq(dino_set_isa_irq, s, 0)); */
- NULL);
+ isa_irqs = i8259_init(isa_bus, NULL);
isa_bus_register_input_irqs(isa_bus, isa_irqs);
return isa_bus;
}
+/*
+ * Helper functions to emulate RTC clock and DebugOutputPort
+ */
+static time_t rtc_ref;
+
+static uint64_t io_cpu_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t val = 0;
+
+ switch (addr) {
+ case 0: /* RTC clock */
+ val = time(NULL);
+ val += rtc_ref;
+ break;
+ case 8: /* DebugOutputPort */
+ return 0xe9; /* readback */
+ }
+ return val;
+}
+
+static void io_cpu_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ unsigned char ch;
+ Chardev *debugout;
+
+ switch (addr) {
+ case 0: /* RTC clock */
+ rtc_ref = val - time(NULL);
+ break;
+ case 8: /* DebugOutputPort */
+ ch = val;
+ debugout = serial_hd(0);
+ if (debugout) {
+ qemu_chr_fe_write_all(debugout->be, &ch, 1);
+ } else {
+ fprintf(stderr, "%c", ch);
+ }
+ break;
+ }
+}
+
+static const MemoryRegionOps hppa_io_helper_ops = {
+ .read = io_cpu_read,
+ .write = io_cpu_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+
static uint64_t cpu_hppa_to_phys(void *opaque, uint64_t addr)
{
addr &= (0x10000000 - 1);
@@ -118,11 +179,13 @@ static void fw_cfg_boot_set(void *opaque, const char *boot_device,
fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
}
-static FWCfgState *create_fw_cfg(MachineState *ms)
+static FWCfgState *create_fw_cfg(MachineState *ms, PCIBus *pci_bus)
{
FWCfgState *fw_cfg;
uint64_t val;
const char qemu_version[] = QEMU_VERSION;
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ int len;
fw_cfg = fw_cfg_init_mem(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4);
fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, ms->smp.cpus);
@@ -137,8 +200,24 @@ static FWCfgState *create_fw_cfg(MachineState *ms)
fw_cfg_add_file(fw_cfg, "/etc/cpu/tlb_entries",
g_memdup(&val, sizeof(val)), sizeof(val));
+ val = cpu_to_le64(HPPA_BTLB_ENTRIES);
+ fw_cfg_add_file(fw_cfg, "/etc/cpu/btlb_entries",
+ g_memdup(&val, sizeof(val)), sizeof(val));
+
+ len = strlen(mc->name) + 1;
+ fw_cfg_add_file(fw_cfg, "/etc/hppa/machine",
+ g_memdup(mc->name, len), len);
+
val = cpu_to_le64(HPA_POWER_BUTTON);
- fw_cfg_add_file(fw_cfg, "/etc/power-button-addr",
+ fw_cfg_add_file(fw_cfg, "/etc/hppa/power-button-addr",
+ g_memdup(&val, sizeof(val)), sizeof(val));
+
+ val = cpu_to_le64(CPU_HPA + 16);
+ fw_cfg_add_file(fw_cfg, "/etc/hppa/rtc-addr",
+ g_memdup(&val, sizeof(val)), sizeof(val));
+
+ val = cpu_to_le64(CPU_HPA + 24);
+ fw_cfg_add_file(fw_cfg, "/etc/hppa/DebugOutputPort",
g_memdup(&val, sizeof(val)), sizeof(val));
fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ms->boot_config.order[0]);
@@ -148,6 +227,8 @@ static FWCfgState *create_fw_cfg(MachineState *ms)
g_memdup(qemu_version, sizeof(qemu_version)),
sizeof(qemu_version));
+ fw_cfg_add_extra_pci_roots(pci_bus, fw_cfg);
+
return fw_cfg;
}
@@ -173,29 +254,20 @@ static DinoState *dino_init(MemoryRegion *addr_space)
return DINO_PCI_HOST_BRIDGE(dev);
}
-static void machine_hppa_init(MachineState *machine)
+/*
+ * Step 1: Create CPUs and Memory
+ */
+static void machine_HP_common_init_cpus(MachineState *machine)
{
- const char *kernel_filename = machine->kernel_filename;
- const char *kernel_cmdline = machine->kernel_cmdline;
- const char *initrd_filename = machine->initrd_filename;
- MachineClass *mc = MACHINE_GET_CLASS(machine);
- DeviceState *dev, *dino_dev, *lasi_dev;
- PCIBus *pci_bus;
- ISABus *isa_bus;
- char *firmware_filename;
- uint64_t firmware_low, firmware_high;
- long size;
- uint64_t kernel_entry = 0, kernel_low, kernel_high;
MemoryRegion *addr_space = get_system_memory();
- MemoryRegion *rom_region;
MemoryRegion *cpu_region;
long i;
unsigned int smp_cpus = machine->smp.cpus;
- SysBusDevice *s;
+ char *name;
/* Create CPUs. */
for (i = 0; i < smp_cpus; i++) {
- char *name = g_strdup_printf("cpu%ld-io-eir", i);
+ name = g_strdup_printf("cpu%ld-io-eir", i);
cpu[i] = HPPA_CPU(cpu_create(machine->cpu_type));
cpu_region = g_new(MemoryRegion, 1);
@@ -206,51 +278,40 @@ static void machine_hppa_init(MachineState *machine)
g_free(name);
}
+ /* RTC and DebugOutputPort on CPU #0 */
+ cpu_region = g_new(MemoryRegion, 1);
+ memory_region_init_io(cpu_region, OBJECT(cpu[0]), &hppa_io_helper_ops,
+ cpu[0], "cpu0-io-rtc", 2 * sizeof(uint64_t));
+ memory_region_add_subregion(addr_space, CPU_HPA + 16, cpu_region);
+
/* Main memory region. */
if (machine->ram_size > 3 * GiB) {
error_report("RAM size is currently restricted to 3GB");
exit(EXIT_FAILURE);
}
memory_region_add_subregion_overlap(addr_space, 0, machine->ram, -1);
+}
-
- /* Init Lasi chip */
- lasi_dev = DEVICE(lasi_init());
- memory_region_add_subregion(addr_space, LASI_HPA,
- sysbus_mmio_get_region(
- SYS_BUS_DEVICE(lasi_dev), 0));
-
- /* Init Dino (PCI host bus chip). */
- dino_dev = DEVICE(dino_init(addr_space));
- memory_region_add_subregion(addr_space, DINO_HPA,
- sysbus_mmio_get_region(
- SYS_BUS_DEVICE(dino_dev), 0));
- pci_bus = PCI_BUS(qdev_get_child_bus(dino_dev, "pci"));
- assert(pci_bus);
-
- /* Create ISA bus. */
- isa_bus = hppa_isa_bus();
- assert(isa_bus);
-
- /* Realtime clock, used by firmware for PDC_TOD call. */
- mc146818_rtc_init(isa_bus, 2000, NULL);
-
- /* Serial ports: Lasi and Dino use a 7.272727 MHz clock. */
- serial_mm_init(addr_space, LASI_UART_HPA + 0x800, 0,
- qdev_get_gpio_in(lasi_dev, LASI_IRQ_UART_HPA), 7272727 / 16,
- serial_hd(0), DEVICE_BIG_ENDIAN);
-
- serial_mm_init(addr_space, DINO_UART_HPA + 0x800, 0,
- qdev_get_gpio_in(dino_dev, DINO_IRQ_RS232INT), 7272727 / 16,
- serial_hd(1), DEVICE_BIG_ENDIAN);
-
- /* Parallel port */
- parallel_mm_init(addr_space, LASI_LPT_HPA + 0x800, 0,
- qdev_get_gpio_in(lasi_dev, LASI_IRQ_LAN_HPA),
- parallel_hds[0]);
-
- /* fw_cfg configuration interface */
- create_fw_cfg(machine);
+/*
+ * Last creation step: Add SCSI discs, NICs, graphics & load firmware
+ */
+static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus)
+{
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ const char *initrd_filename = machine->initrd_filename;
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ DeviceState *dev;
+ PCIDevice *pci_dev;
+ char *firmware_filename;
+ uint64_t firmware_low, firmware_high;
+ long size;
+ uint64_t kernel_entry = 0, kernel_low, kernel_high;
+ MemoryRegion *addr_space = get_system_memory();
+ MemoryRegion *rom_region;
+ long i;
+ unsigned int smp_cpus = machine->smp.cpus;
+ SysBusDevice *s;
/* SCSI disk setup. */
dev = DEVICE(pci_create_simple(pci_bus, -1, "lsi53c895a"));
@@ -278,21 +339,42 @@ static void machine_hppa_init(MachineState *machine)
}
}
- /* PS/2 Keyboard/Mouse */
- dev = qdev_new(TYPE_LASIPS2);
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
- sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
- qdev_get_gpio_in(lasi_dev, LASI_IRQ_PS2KBD_HPA));
- memory_region_add_subregion(addr_space, LASI_PS2KBD_HPA,
- sysbus_mmio_get_region(SYS_BUS_DEVICE(dev),
- 0));
- memory_region_add_subregion(addr_space, LASI_PS2KBD_HPA + 0x100,
- sysbus_mmio_get_region(SYS_BUS_DEVICE(dev),
- 1));
+ /* BMC board: HP Powerbar SP2 Diva (with console only) */
+ pci_dev = pci_new(-1, "pci-serial");
+ if (!lasi_dev) {
+ /* bind default keyboard/serial to Diva card */
+ qdev_prop_set_chr(DEVICE(pci_dev), "chardev", serial_hd(0));
+ }
+ qdev_prop_set_uint8(DEVICE(pci_dev), "prog_if", 0);
+ pci_realize_and_unref(pci_dev, pci_bus, &error_fatal);
+ pci_config_set_vendor_id(pci_dev->config, PCI_VENDOR_ID_HP);
+ pci_config_set_device_id(pci_dev->config, 0x1048);
+ pci_set_word(&pci_dev->config[PCI_SUBSYSTEM_VENDOR_ID], PCI_VENDOR_ID_HP);
+ pci_set_word(&pci_dev->config[PCI_SUBSYSTEM_ID], 0x1227); /* Powerbar */
+
+ /* create a second serial PCI card when running Astro */
+ if (!lasi_dev) {
+ pci_dev = pci_new(-1, "pci-serial-4x");
+ qdev_prop_set_chr(DEVICE(pci_dev), "chardev1", serial_hd(1));
+ qdev_prop_set_chr(DEVICE(pci_dev), "chardev2", serial_hd(2));
+ qdev_prop_set_chr(DEVICE(pci_dev), "chardev3", serial_hd(3));
+ qdev_prop_set_chr(DEVICE(pci_dev), "chardev4", serial_hd(4));
+ pci_realize_and_unref(pci_dev, pci_bus, &error_fatal);
+ }
+
+ /* create USB OHCI controller for USB keyboard & mouse on Astro machines */
+ if (!lasi_dev && machine->enable_graphics) {
+ pci_create_simple(pci_bus, -1, "pci-ohci");
+ usb_create_simple(usb_bus_find(-1), "usb-kbd");
+ usb_create_simple(usb_bus_find(-1), "usb-mouse");
+ }
/* register power switch emulation */
qemu_register_powerdown_notifier(&hppa_system_powerdown_notifier);
+ /* fw_cfg configuration interface */
+ create_fw_cfg(machine, pci_bus);
+
/* Load firmware. Given that this is not "real" firmware,
but one explicitly written for the emulation, we might as
well load it directly from an ELF image. */
@@ -410,6 +492,103 @@ static void machine_hppa_init(MachineState *machine)
cpu[0]->env.gr[19] = FW_CFG_IO_BASE;
}
+/*
+ * Create HP B160L workstation
+ */
+static void machine_HP_B160L_init(MachineState *machine)
+{
+ DeviceState *dev, *dino_dev;
+ MemoryRegion *addr_space = get_system_memory();
+ ISABus *isa_bus;
+ PCIBus *pci_bus;
+
+ /* Create CPUs and RAM. */
+ machine_HP_common_init_cpus(machine);
+
+ /* Init Lasi chip */
+ lasi_dev = DEVICE(lasi_init());
+ memory_region_add_subregion(addr_space, LASI_HPA,
+ sysbus_mmio_get_region(
+ SYS_BUS_DEVICE(lasi_dev), 0));
+
+ /* Init Dino (PCI host bus chip). */
+ dino_dev = DEVICE(dino_init(addr_space));
+ memory_region_add_subregion(addr_space, DINO_HPA,
+ sysbus_mmio_get_region(
+ SYS_BUS_DEVICE(dino_dev), 0));
+ pci_bus = PCI_BUS(qdev_get_child_bus(dino_dev, "pci"));
+ assert(pci_bus);
+
+ /* Create ISA bus, needed for PS/2 kbd/mouse port emulation */
+ isa_bus = hppa_isa_bus();
+ assert(isa_bus);
+
+ /* Serial ports: Lasi and Dino use a 7.272727 MHz clock. */
+ serial_mm_init(addr_space, LASI_UART_HPA + 0x800, 0,
+ qdev_get_gpio_in(lasi_dev, LASI_IRQ_UART_HPA), 7272727 / 16,
+ serial_hd(0), DEVICE_BIG_ENDIAN);
+
+ serial_mm_init(addr_space, DINO_UART_HPA + 0x800, 0,
+ qdev_get_gpio_in(dino_dev, DINO_IRQ_RS232INT), 7272727 / 16,
+ serial_hd(1), DEVICE_BIG_ENDIAN);
+
+ /* Parallel port */
+ parallel_mm_init(addr_space, LASI_LPT_HPA + 0x800, 0,
+ qdev_get_gpio_in(lasi_dev, LASI_IRQ_LAN_HPA),
+ parallel_hds[0]);
+
+ /* PS/2 Keyboard/Mouse */
+ dev = qdev_new(TYPE_LASIPS2);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
+ qdev_get_gpio_in(lasi_dev, LASI_IRQ_PS2KBD_HPA));
+ memory_region_add_subregion(addr_space, LASI_PS2KBD_HPA,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(dev),
+ 0));
+ memory_region_add_subregion(addr_space, LASI_PS2KBD_HPA + 0x100,
+ sysbus_mmio_get_region(SYS_BUS_DEVICE(dev),
+ 1));
+
+ /* Add SCSI discs, NICs, graphics & load firmware */
+ machine_HP_common_init_tail(machine, pci_bus);
+}
+
+static AstroState *astro_init(void)
+{
+ DeviceState *dev;
+
+ dev = qdev_new(TYPE_ASTRO_CHIP);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
+ return ASTRO_CHIP(dev);
+}
+
+/*
+ * Create HP C3700 workstation
+ */
+static void machine_HP_C3700_init(MachineState *machine)
+{
+ PCIBus *pci_bus;
+ AstroState *astro;
+ DeviceState *astro_dev;
+ MemoryRegion *addr_space = get_system_memory();
+
+ /* Create CPUs and RAM. */
+ machine_HP_common_init_cpus(machine);
+
+ /* Init Astro and the Elroys (PCI host bus chips). */
+ astro = astro_init();
+ astro_dev = DEVICE(astro);
+ memory_region_add_subregion(addr_space, ASTRO_HPA,
+ sysbus_mmio_get_region(
+ SYS_BUS_DEVICE(astro_dev), 0));
+ pci_bus = PCI_BUS(qdev_get_child_bus(DEVICE(astro->elroy[0]), "pci"));
+ assert(pci_bus);
+
+ /* Add SCSI discs, NICs, graphics & load firmware */
+ machine_HP_common_init_tail(machine, pci_bus);
+}
+
static void hppa_machine_reset(MachineState *ms, ShutdownCause reason)
{
unsigned int smp_cpus = ms->smp.cpus;
@@ -458,14 +637,14 @@ static void hppa_nmi(NMIState *n, int cpu_index, Error **errp)
}
}
-static void hppa_machine_init_class_init(ObjectClass *oc, void *data)
+static void HP_B160L_machine_init_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
NMIClass *nc = NMI_CLASS(oc);
- mc->desc = "HPPA B160L machine";
+ mc->desc = "HP B160L workstation";
mc->default_cpu_type = TYPE_HPPA_CPU;
- mc->init = machine_hppa_init;
+ mc->init = machine_HP_B160L_init;
mc->reset = hppa_machine_reset;
mc->block_default_type = IF_SCSI;
mc->max_cpus = HPPA_MAX_CPUS;
@@ -479,10 +658,41 @@ static void hppa_machine_init_class_init(ObjectClass *oc, void *data)
nc->nmi_monitor_handler = hppa_nmi;
}
-static const TypeInfo hppa_machine_init_typeinfo = {
- .name = MACHINE_TYPE_NAME("hppa"),
+static const TypeInfo HP_B160L_machine_init_typeinfo = {
+ .name = MACHINE_TYPE_NAME("B160L"),
+ .parent = TYPE_MACHINE,
+ .class_init = HP_B160L_machine_init_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_NMI },
+ { }
+ },
+};
+
+static void HP_C3700_machine_init_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ NMIClass *nc = NMI_CLASS(oc);
+
+ mc->desc = "HP C3700 workstation";
+ mc->default_cpu_type = TYPE_HPPA_CPU;
+ mc->init = machine_HP_C3700_init;
+ mc->reset = hppa_machine_reset;
+ mc->block_default_type = IF_SCSI;
+ mc->max_cpus = HPPA_MAX_CPUS;
+ mc->default_cpus = 1;
+ mc->is_default = false;
+ mc->default_ram_size = 1024 * MiB;
+ mc->default_boot_order = "cd";
+ mc->default_ram_id = "ram";
+ mc->default_nic = "tulip";
+
+ nc->nmi_monitor_handler = hppa_nmi;
+}
+
+static const TypeInfo HP_C3700_machine_init_typeinfo = {
+ .name = MACHINE_TYPE_NAME("C3700"),
.parent = TYPE_MACHINE,
- .class_init = hppa_machine_init_class_init,
+ .class_init = HP_C3700_machine_init_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_NMI },
{ }
@@ -491,7 +701,8 @@ static const TypeInfo hppa_machine_init_typeinfo = {
static void hppa_machine_init_register_types(void)
{
- type_register_static(&hppa_machine_init_typeinfo);
+ type_register_static(&HP_B160L_machine_init_typeinfo);
+ type_register_static(&HP_C3700_machine_init_typeinfo);
}
type_init(hppa_machine_init_register_types)
diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig
index 9051083c1e..94772c726b 100644
--- a/hw/i386/Kconfig
+++ b/hw/i386/Kconfig
@@ -72,8 +72,7 @@ config I440FX
select PC_PCI
select PC_ACPI
select PCI_I440FX
- select PIIX3
- select IDE_PIIX
+ select PIIX
select DIMM
select SMBIOS
select FW_CFG_DMA
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 3f2b27cf75..80db183b78 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -56,7 +56,6 @@
/* Supported chipsets: */
#include "hw/southbridge/ich9.h"
-#include "hw/southbridge/piix.h"
#include "hw/acpi/pcihp.h"
#include "hw/i386/fw_cfg.h"
#include "hw/i386/pc.h"
@@ -242,10 +241,6 @@ static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm)
pm->pcihp_io_len =
object_property_get_uint(obj, ACPI_PCIHP_IO_LEN_PROP, NULL);
- /* The above need not be conditional on machine type because the reset port
- * happens to be the same on PIIX (pc) and ICH9 (q35). */
- QEMU_BUILD_BUG_ON(ICH9_RST_CNT_IOPORT != PIIX_RCR_IOPORT);
-
/* Fill in optional s3/s4 related properties */
o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL);
if (o) {
@@ -1422,6 +1417,7 @@ static void build_acpi0017(Aml *table)
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_int(0x01)));
aml_append(dev, method);
+ build_cxl_dsm_method(dev);
aml_append(scope, dev);
aml_append(table, scope);
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index 8d0f2f99dd..7965415b47 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -1579,9 +1579,8 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
/* set up MMIO */
memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio",
AMDVI_MMIO_SIZE);
-
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio);
- sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR);
+ memory_region_add_subregion(get_system_memory(), AMDVI_BASE_ADDR,
+ &s->mmio);
pci_setup_iommu(bus, amdvi_host_dma_iommu, s);
amdvi_init(s);
}
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 2c832ab68b..1c6c18622f 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -469,21 +469,12 @@ static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
/* Must not update F field now, should be done later */
static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index,
- uint16_t source_id, hwaddr addr,
- VTDFaultReason fault, bool is_write,
- bool is_pasid, uint32_t pasid)
+ uint64_t hi, uint64_t lo)
{
- uint64_t hi = 0, lo;
hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
assert(index < DMAR_FRCD_REG_NR);
- lo = VTD_FRCD_FI(addr);
- hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault) |
- VTD_FRCD_PV(pasid) | VTD_FRCD_PP(is_pasid);
- if (!is_write) {
- hi |= VTD_FRCD_T;
- }
vtd_set_quad_raw(s, frcd_reg_addr, lo);
vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
@@ -509,17 +500,11 @@ static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id)
}
/* Log and report an DMAR (address translation) fault to software */
-static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
- hwaddr addr, VTDFaultReason fault,
- bool is_write, bool is_pasid,
- uint32_t pasid)
+static void vtd_report_frcd_fault(IntelIOMMUState *s, uint64_t source_id,
+ uint64_t hi, uint64_t lo)
{
uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
- assert(fault < VTD_FR_MAX);
-
- trace_vtd_dmar_fault(source_id, fault, addr, is_write);
-
if (fsts_reg & VTD_FSTS_PFO) {
error_report_once("New fault is not recorded due to "
"Primary Fault Overflow");
@@ -539,8 +524,7 @@ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
return;
}
- vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault,
- is_write, is_pasid, pasid);
+ vtd_record_frcd(s, s->next_frcd_reg, hi, lo);
if (fsts_reg & VTD_FSTS_PPF) {
error_report_once("There are pending faults already, "
@@ -565,6 +549,40 @@ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
}
}
+/* Log and report an DMAR (address translation) fault to software */
+static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
+ hwaddr addr, VTDFaultReason fault,
+ bool is_write, bool is_pasid,
+ uint32_t pasid)
+{
+ uint64_t hi, lo;
+
+ assert(fault < VTD_FR_MAX);
+
+ trace_vtd_dmar_fault(source_id, fault, addr, is_write);
+
+ lo = VTD_FRCD_FI(addr);
+ hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault) |
+ VTD_FRCD_PV(pasid) | VTD_FRCD_PP(is_pasid);
+ if (!is_write) {
+ hi |= VTD_FRCD_T;
+ }
+
+ vtd_report_frcd_fault(s, source_id, hi, lo);
+}
+
+
+static void vtd_report_ir_fault(IntelIOMMUState *s, uint64_t source_id,
+ VTDFaultReason fault, uint16_t index)
+{
+ uint64_t hi, lo;
+
+ lo = VTD_FRCD_IR_IDX(index);
+ hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault);
+
+ vtd_report_frcd_fault(s, source_id, hi, lo);
+}
+
/* Handle Invalidation Queue Errors of queued invalidation interface error
* conditions.
*/
@@ -3305,8 +3323,9 @@ static Property vtd_properties[] = {
};
/* Read IRTE entry with specific index */
-static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
- VTD_IR_TableEntry *entry, uint16_t sid)
+static bool vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
+ VTD_IR_TableEntry *entry, uint16_t sid,
+ bool do_fault)
{
static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \
{0xffff, 0xfffb, 0xfff9, 0xfff8};
@@ -3317,7 +3336,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
if (index >= iommu->intr_size) {
error_report_once("%s: index too large: ind=0x%x",
__func__, index);
- return -VTD_FR_IR_INDEX_OVER;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_INDEX_OVER, index);
+ }
+ return false;
}
addr = iommu->intr_root + index * sizeof(*entry);
@@ -3325,7 +3347,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
entry, sizeof(*entry), MEMTXATTRS_UNSPECIFIED)) {
error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64,
__func__, index, addr);
- return -VTD_FR_IR_ROOT_INVAL;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_ROOT_INVAL, index);
+ }
+ return false;
}
entry->data[0] = le64_to_cpu(entry->data[0]);
@@ -3333,11 +3358,24 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
trace_vtd_ir_irte_get(index, entry->data[1], entry->data[0]);
+ /*
+ * The remaining potential fault conditions are "qualified" by the
+ * Fault Processing Disable bit in the IRTE. Even "not present".
+ * So just clear the do_fault flag if PFD is set, which will
+ * prevent faults being raised.
+ */
+ if (entry->irte.fault_disable) {
+ do_fault = false;
+ }
+
if (!entry->irte.present) {
error_report_once("%s: detected non-present IRTE "
"(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")",
__func__, index, entry->data[1], entry->data[0]);
- return -VTD_FR_IR_ENTRY_P;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_ENTRY_P, index);
+ }
+ return false;
}
if (entry->irte.__reserved_0 || entry->irte.__reserved_1 ||
@@ -3345,7 +3383,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
error_report_once("%s: detected non-zero reserved IRTE "
"(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")",
__func__, index, entry->data[1], entry->data[0]);
- return -VTD_FR_IR_IRTE_RSVD;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_IRTE_RSVD, index);
+ }
+ return false;
}
if (sid != X86_IOMMU_SID_INVALID) {
@@ -3361,7 +3402,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
error_report_once("%s: invalid IRTE SID "
"(index=%u, sid=%u, source_id=%u)",
__func__, index, sid, source_id);
- return -VTD_FR_IR_SID_ERR;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_SID_ERR, index);
+ }
+ return false;
}
break;
@@ -3373,7 +3417,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
error_report_once("%s: invalid SVT_BUS "
"(index=%u, bus=%u, min=%u, max=%u)",
__func__, index, bus, bus_min, bus_max);
- return -VTD_FR_IR_SID_ERR;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_SID_ERR, index);
+ }
+ return false;
}
break;
@@ -3382,23 +3429,24 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
"(index=%u, type=%d)", __func__,
index, entry->irte.sid_vtype);
/* Take this as verification failure. */
- return -VTD_FR_IR_SID_ERR;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_SID_ERR, index);
+ }
+ return false;
}
}
- return 0;
+ return true;
}
/* Fetch IRQ information of specific IR index */
-static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
- X86IOMMUIrq *irq, uint16_t sid)
+static bool vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
+ X86IOMMUIrq *irq, uint16_t sid, bool do_fault)
{
VTD_IR_TableEntry irte = {};
- int ret = 0;
- ret = vtd_irte_get(iommu, index, &irte, sid);
- if (ret) {
- return ret;
+ if (!vtd_irte_get(iommu, index, &irte, sid, do_fault)) {
+ return false;
}
irq->trigger_mode = irte.irte.trigger_mode;
@@ -3417,16 +3465,15 @@ static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector,
irq->delivery_mode, irq->dest, irq->dest_mode);
- return 0;
+ return true;
}
/* Interrupt remapping for MSI/MSI-X entry */
static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
MSIMessage *origin,
MSIMessage *translated,
- uint16_t sid)
+ uint16_t sid, bool do_fault)
{
- int ret = 0;
VTD_IR_MSIAddress addr;
uint16_t index;
X86IOMMUIrq irq = {};
@@ -3443,14 +3490,20 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
if (origin->address & VTD_MSI_ADDR_HI_MASK) {
error_report_once("%s: MSI address high 32 bits non-zero detected: "
"address=0x%" PRIx64, __func__, origin->address);
- return -VTD_FR_IR_REQ_RSVD;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_REQ_RSVD, 0);
+ }
+ return -EINVAL;
}
addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
if (addr.addr.__head != 0xfee) {
error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32,
__func__, addr.data);
- return -VTD_FR_IR_REQ_RSVD;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_REQ_RSVD, 0);
+ }
+ return -EINVAL;
}
/* This is compatible mode. */
@@ -3469,9 +3522,8 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE;
}
- ret = vtd_remap_irq_get(iommu, index, &irq, sid);
- if (ret) {
- return ret;
+ if (!vtd_remap_irq_get(iommu, index, &irq, sid, do_fault)) {
+ return -EINVAL;
}
if (addr.addr.sub_valid) {
@@ -3481,7 +3533,10 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
"(sid=%u, address=0x%" PRIx64
", data=0x%" PRIx32 ")",
__func__, sid, origin->address, origin->data);
- return -VTD_FR_IR_REQ_RSVD;
+ if (do_fault) {
+ vtd_report_ir_fault(iommu, sid, VTD_FR_IR_REQ_RSVD, 0);
+ }
+ return -EINVAL;
}
} else {
uint8_t vector = origin->data & 0xff;
@@ -3521,7 +3576,7 @@ static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
MSIMessage *dst, uint16_t sid)
{
return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu),
- src, dst, sid);
+ src, dst, sid, false);
}
static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
@@ -3547,9 +3602,8 @@ static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
sid = attrs.requester_id;
}
- ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid);
+ ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid, true);
if (ret) {
- /* TODO: report error */
/* Drop this interrupt */
return MEMTX_ERROR;
}
@@ -4134,6 +4188,8 @@ static void vtd_realize(DeviceState *dev, Error **errp)
qemu_mutex_init(&s->iommu_lock);
memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
"intel_iommu", DMAR_REG_SIZE);
+ memory_region_add_subregion(get_system_memory(),
+ Q35_HOST_BRIDGE_IOMMU_ADDR, &s->csrmem);
/* Create the shared memory regions by all devices */
memory_region_init(&s->mr_nodmar, OBJECT(s), "vtd-nodmar",
@@ -4148,15 +4204,12 @@ static void vtd_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion_overlap(&s->mr_nodmar,
VTD_INTERRUPT_ADDR_FIRST,
&s->mr_ir, 1);
-
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
/* No corresponding destroy */
s->iotlb = g_hash_table_new_full(vtd_iotlb_hash, vtd_iotlb_equal,
g_free, g_free);
s->vtd_address_spaces = g_hash_table_new_full(vtd_as_hash, vtd_as_equal,
g_free, g_free);
vtd_init(s);
- sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR);
pci_setup_iommu(bus, vtd_host_dma_iommu, dev);
/* Pseudo address space under root PCI bus. */
x86ms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC);
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index e1450c5cfe..f8cf99bddf 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -268,6 +268,7 @@
#define VTD_FRCD_FI(val) ((val) & ~0xfffULL)
#define VTD_FRCD_PV(val) (((val) & 0xffffULL) << 40)
#define VTD_FRCD_PP(val) (((val) & 0x1) << 31)
+#define VTD_FRCD_IR_IDX(val) (((val) & 0xffffULL) << 48)
/* DMA Remapping Fault Conditions */
typedef enum VTDFaultReason {
diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c
index f25977d3f6..e756b0aa43 100644
--- a/hw/i386/kvm/clock.c
+++ b/hw/i386/kvm/clock.c
@@ -333,10 +333,6 @@ void kvmclock_create(bool create_always)
X86CPU *cpu = X86_CPU(first_cpu);
assert(kvm_enabled());
- if (!kvm_has_adjust_clock()) {
- return;
- }
-
if (create_always ||
cpu->env.features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) |
(1ULL << KVM_FEATURE_CLOCKSOURCE2))) {
diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c
index a649b2b7ca..e49b9c4b56 100644
--- a/hw/i386/kvm/i8254.c
+++ b/hw/i386/kvm/i8254.c
@@ -97,24 +97,12 @@ static void kvm_pit_get(PITCommonState *pit)
return;
}
- if (kvm_has_pit_state2()) {
- ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT2, &kpit);
- if (ret < 0) {
- fprintf(stderr, "KVM_GET_PIT2 failed: %s\n", strerror(-ret));
- abort();
- }
- pit->channels[0].irq_disabled = kpit.flags & KVM_PIT_FLAGS_HPET_LEGACY;
- } else {
- /*
- * kvm_pit_state2 is superset of kvm_pit_state struct,
- * so we can use it for KVM_GET_PIT as well.
- */
- ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT, &kpit);
- if (ret < 0) {
- fprintf(stderr, "KVM_GET_PIT failed: %s\n", strerror(-ret));
- abort();
- }
+ ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT2, &kpit);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_GET_PIT2 failed: %s\n", strerror(-ret));
+ abort();
}
+ pit->channels[0].irq_disabled = kpit.flags & KVM_PIT_FLAGS_HPET_LEGACY;
for (i = 0; i < 3; i++) {
kchan = &kpit.channels[i];
sc = &pit->channels[i];
@@ -170,12 +158,9 @@ static void kvm_pit_put(PITCommonState *pit)
kchan->count_load_time = sc->count_load_time - s->kernel_clock_offset;
}
- ret = kvm_vm_ioctl(kvm_state,
- kvm_has_pit_state2() ? KVM_SET_PIT2 : KVM_SET_PIT,
- &kpit);
+ ret = kvm_vm_ioctl(kvm_state, KVM_SET_PIT2, &kpit);
if (ret < 0) {
- fprintf(stderr, "%s failed: %s\n",
- kvm_has_pit_state2() ? "KVM_SET_PIT2" : "KVM_SET_PIT",
+ fprintf(stderr, "KVM_SET_PIT2 failed: %s\n",
strerror(-ret));
abort();
}
@@ -261,11 +246,12 @@ static void kvm_pit_realizefn(DeviceState *dev, Error **errp)
};
int ret;
- if (kvm_check_extension(kvm_state, KVM_CAP_PIT2)) {
- ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT2, &config);
- } else {
- ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT);
+ if (!kvm_check_extension(kvm_state, KVM_CAP_PIT_STATE2) ||
+ !kvm_check_extension(kvm_state, KVM_CAP_PIT2)) {
+ error_setg(errp, "In-kernel PIT not available");
}
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT2, &config);
if (ret < 0) {
error_setg(errp, "Create kernel PIC irqchip failed: %s",
strerror(-ret));
diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c
index b9c93039e2..ca55aecc3b 100644
--- a/hw/i386/microvm.c
+++ b/hw/i386/microvm.c
@@ -206,12 +206,12 @@ static void microvm_devices_init(MicrovmMachineState *mms)
if (x86_machine_is_acpi_enabled(x86ms)) {
DeviceState *dev = qdev_new(TYPE_ACPI_GED);
qdev_prop_set_uint32(dev, "ged-event", ACPI_GED_PWR_DOWN_EVT);
+ sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, GED_MMIO_BASE);
/* sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, GED_MMIO_BASE_MEMHP); */
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, GED_MMIO_BASE_REGS);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
x86ms->gsi[GED_MMIO_IRQ]);
- sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal);
x86ms->acpi_dev = HOTPLUG_HANDLER(dev);
}
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index bb3854d1d0..6031234a73 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -781,10 +781,12 @@ static void pc_get_device_memory_range(PCMachineState *pcms,
static uint64_t pc_get_cxl_range_start(PCMachineState *pcms)
{
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
+ MachineState *ms = MACHINE(pcms);
hwaddr cxl_base;
ram_addr_t size;
- if (pcmc->has_reserved_memory) {
+ if (pcmc->has_reserved_memory &&
+ (ms->ram_size < ms->maxram_size)) {
pc_get_device_memory_range(pcms, &cxl_base, &size);
cxl_base += size;
} else {
@@ -1199,7 +1201,6 @@ void pc_basic_device_init(struct PCMachineState *pcms,
DeviceState *hpet = NULL;
int pit_isa_irq = 0;
qemu_irq pit_alt_irq = NULL;
- qemu_irq rtc_irq = NULL;
ISADevice *pit = NULL;
MemoryRegion *ioport80_io = g_new(MemoryRegion, 1);
MemoryRegion *ioportF0_io = g_new(MemoryRegion, 1);
@@ -1213,12 +1214,10 @@ void pc_basic_device_init(struct PCMachineState *pcms,
/*
* Check if an HPET shall be created.
- *
- * Without KVM_CAP_PIT_STATE2, we cannot switch off the in-kernel PIT
- * when the HPET wants to take over. Thus we have to disable the latter.
*/
- if (pcms->hpet_enabled && (!kvm_irqchip_in_kernel() ||
- kvm_has_pit_state2())) {
+ if (pcms->hpet_enabled) {
+ qemu_irq rtc_irq;
+
hpet = qdev_try_new(TYPE_HPET);
if (!hpet) {
error_report("couldn't create HPET device");
@@ -1243,16 +1242,11 @@ void pc_basic_device_init(struct PCMachineState *pcms,
pit_isa_irq = -1;
pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT);
rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT);
- }
- if (rtc_irq) {
+ /* overwrite connection created by south bridge */
qdev_connect_gpio_out(DEVICE(rtc_state), 0, rtc_irq);
- } else {
- uint32_t irq = object_property_get_uint(OBJECT(rtc_state),
- "irq",
- &error_fatal);
- isa_connect_gpio_out(rtc_state, 0, irq);
}
+
object_property_add_alias(OBJECT(pcms), "rtc-time", OBJECT(rtc_state),
"date");
@@ -1283,7 +1277,9 @@ void pc_basic_device_init(struct PCMachineState *pcms,
/* connect PIT to output control line of the HPET */
qdev_connect_gpio_out(hpet, 0, qdev_get_gpio_in(DEVICE(pit), 0));
}
- pcspk_init(pcms->pcspk, isa_bus, pit);
+ object_property_set_link(OBJECT(pcms->pcspk), "pit",
+ OBJECT(pit), &error_fatal);
+ isa_realize_and_unref(pcms->pcspk, isa_bus, &error_fatal);
}
/* Super I/O */
@@ -1710,6 +1706,7 @@ static void pc_machine_initfn(Object *obj)
#endif /* CONFIG_VMPORT */
pcms->max_ram_below_4g = 0; /* use default */
pcms->smbios_entry_point_type = pcmc->default_smbios_ep_type;
+ pcms->south_bridge = pcmc->default_south_bridge;
/* acpi build is enabled by default if machine supports it */
pcms->acpi_build_enabled = pcmc->has_acpi_build;
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index e36a3262b2..26e161beb9 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -43,7 +43,6 @@
#include "net/net.h"
#include "hw/ide/isa.h"
#include "hw/ide/pci.h"
-#include "hw/ide/piix.h"
#include "hw/irq.h"
#include "sysemu/kvm.h"
#include "hw/i386/kvm/clock.h"
@@ -51,8 +50,6 @@
#include "hw/i2c/smbus_eeprom.h"
#include "exec/memory.h"
#include "hw/acpi/acpi.h"
-#include "hw/acpi/piix4.h"
-#include "hw/usb/hcd-uhci.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/xen.h"
@@ -117,7 +114,7 @@ static void pc_init1(MachineState *machine,
MemoryRegion *system_io = get_system_io();
PCIBus *pci_bus = NULL;
ISABus *isa_bus;
- int piix3_devfn = -1;
+ Object *piix4_pm = NULL;
qemu_irq smi_irq;
GSIState *gsi_state;
BusState *idebus[MAX_IDE_BUS];
@@ -261,10 +258,29 @@ static void pc_init1(MachineState *machine,
gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled);
if (pcmc->pci_enabled) {
- PIIX3State *piix3;
PCIDevice *pci_dev;
-
- pci_dev = pci_create_simple_multifunction(pci_bus, -1, TYPE_PIIX3_DEVICE);
+ DeviceState *dev;
+ size_t i;
+
+ pci_dev = pci_new_multifunction(-1, pcms->south_bridge);
+ object_property_set_bool(OBJECT(pci_dev), "has-usb",
+ machine_usb(machine), &error_abort);
+ object_property_set_bool(OBJECT(pci_dev), "has-acpi",
+ x86_machine_is_acpi_enabled(x86ms),
+ &error_abort);
+ object_property_set_bool(OBJECT(pci_dev), "has-pic", false,
+ &error_abort);
+ object_property_set_bool(OBJECT(pci_dev), "has-pit", false,
+ &error_abort);
+ qdev_prop_set_uint32(DEVICE(pci_dev), "smb_io_base", 0xb100);
+ object_property_set_bool(OBJECT(pci_dev), "smm-enabled",
+ x86_machine_is_smm_enabled(x86ms),
+ &error_abort);
+ dev = DEVICE(pci_dev);
+ for (i = 0; i < ISA_NUM_IRQS; i++) {
+ qdev_connect_gpio_out_named(dev, "isa-irqs", i, x86ms->gsi[i]);
+ }
+ pci_realize_and_unref(pci_dev, pci_bus, &error_fatal);
if (xen_enabled()) {
pci_device_set_intx_routing_notifier(
@@ -280,15 +296,18 @@ static void pc_init1(MachineState *machine,
XEN_IOAPIC_NUM_PIRQS);
}
- piix3 = PIIX3_PCI_DEVICE(pci_dev);
- piix3->pic = x86ms->gsi;
- piix3_devfn = piix3->dev.devfn;
- isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), "isa.0"));
+ isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(pci_dev), "isa.0"));
rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev),
"rtc"));
+ piix4_pm = object_resolve_path_component(OBJECT(pci_dev), "pm");
+ dev = DEVICE(object_resolve_path_component(OBJECT(pci_dev), "ide"));
+ pci_ide_create_devs(PCI_DEVICE(dev));
+ idebus[0] = qdev_get_child_bus(dev, "ide.0");
+ idebus[1] = qdev_get_child_bus(dev, "ide.1");
} else {
isa_bus = isa_bus_new(NULL, system_memory, system_io,
&error_abort);
+ isa_bus_register_input_irqs(isa_bus, x86ms->gsi);
rtc_state = isa_new(TYPE_MC146818_RTC);
qdev_prop_set_int32(DEVICE(rtc_state), "base_year", 2000);
@@ -296,8 +315,9 @@ static void pc_init1(MachineState *machine,
i8257_dma_init(isa_bus, 0);
pcms->hpet_enabled = false;
+ idebus[0] = NULL;
+ idebus[1] = NULL;
}
- isa_bus_register_input_irqs(isa_bus, x86ms->gsi);
if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) {
pc_i8259_create(isa_bus, gsi_state->i8259_irq);
@@ -325,12 +345,6 @@ static void pc_init1(MachineState *machine,
pc_nic_init(pcmc, isa_bus, pci_bus);
if (pcmc->pci_enabled) {
- PCIDevice *dev;
-
- dev = pci_create_simple(pci_bus, piix3_devfn + 1, TYPE_PIIX3_IDE);
- pci_ide_create_devs(dev);
- idebus[0] = qdev_get_child_bus(&dev->qdev, "ide.0");
- idebus[1] = qdev_get_child_bus(&dev->qdev, "ide.1");
pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state);
}
#ifdef CONFIG_IDE_ISA
@@ -356,21 +370,9 @@ static void pc_init1(MachineState *machine,
}
#endif
- if (pcmc->pci_enabled && machine_usb(machine)) {
- pci_create_simple(pci_bus, piix3_devfn + 2, TYPE_PIIX3_USB_UHCI);
- }
-
- if (pcmc->pci_enabled && x86_machine_is_acpi_enabled(X86_MACHINE(pcms))) {
- PCIDevice *piix4_pm;
-
+ if (piix4_pm) {
smi_irq = qemu_allocate_irq(pc_acpi_smi_interrupt, first_cpu, 0);
- piix4_pm = pci_new(piix3_devfn + 3, TYPE_PIIX4_PM);
- qdev_prop_set_uint32(DEVICE(piix4_pm), "smb_io_base", 0xb100);
- qdev_prop_set_bit(DEVICE(piix4_pm), "smm-enabled",
- x86_machine_is_smm_enabled(x86ms));
- pci_realize_and_unref(piix4_pm, pci_bus, &error_fatal);
- qdev_connect_gpio_out(DEVICE(piix4_pm), 0, x86ms->gsi[9]);
qdev_connect_gpio_out_named(DEVICE(piix4_pm), "smi-irq", 0, smi_irq);
pcms->smbus = I2C_BUS(qdev_get_child_bus(DEVICE(piix4_pm), "i2c"));
/* TODO: Populate SPD eeprom data. */
@@ -382,7 +384,7 @@ static void pc_init1(MachineState *machine,
object_property_allow_set_link,
OBJ_PROP_LINK_STRONG);
object_property_set_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP,
- OBJECT(piix4_pm), &error_abort);
+ piix4_pm, &error_abort);
}
if (machine->nvdimms_state->is_enabled) {
@@ -392,6 +394,56 @@ static void pc_init1(MachineState *machine,
}
}
+typedef enum PCSouthBridgeOption {
+ PC_SOUTH_BRIDGE_OPTION_PIIX3,
+ PC_SOUTH_BRIDGE_OPTION_PIIX4,
+ PC_SOUTH_BRIDGE_OPTION_MAX,
+} PCSouthBridgeOption;
+
+static const QEnumLookup PCSouthBridgeOption_lookup = {
+ .array = (const char *const[]) {
+ [PC_SOUTH_BRIDGE_OPTION_PIIX3] = TYPE_PIIX3_DEVICE,
+ [PC_SOUTH_BRIDGE_OPTION_PIIX4] = TYPE_PIIX4_PCI_DEVICE,
+ },
+ .size = PC_SOUTH_BRIDGE_OPTION_MAX
+};
+
+#define NotifyVmexitOption_str(val) \
+ qapi_enum_lookup(&NotifyVmexitOption_lookup, (val))
+
+static int pc_get_south_bridge(Object *obj, Error **errp)
+{
+ PCMachineState *pcms = PC_MACHINE(obj);
+ int i;
+
+ for (i = 0; i < PCSouthBridgeOption_lookup.size; i++) {
+ if (g_strcmp0(PCSouthBridgeOption_lookup.array[i],
+ pcms->south_bridge) == 0) {
+ return i;
+ }
+ }
+
+ error_setg(errp, "Invalid south bridge value set");
+ return 0;
+}
+
+static void pc_set_south_bridge(Object *obj, int value, Error **errp)
+{
+ PCMachineState *pcms = PC_MACHINE(obj);
+
+ if (value < 0) {
+ error_setg(errp, "Value can't be negative");
+ return;
+ }
+
+ if (value >= PCSouthBridgeOption_lookup.size) {
+ error_setg(errp, "Value too big");
+ return;
+ }
+
+ pcms->south_bridge = PCSouthBridgeOption_lookup.array[value];
+}
+
/* Looking for a pc_compat_2_4() function? It doesn't exist.
* pc_compat_*() functions that run on machine-init time and
* change global QEMU state are deprecated. Please don't create
@@ -471,6 +523,8 @@ static void pc_xen_hvm_init(MachineState *machine)
static void pc_i440fx_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
+ ObjectClass *oc = OBJECT_CLASS(m);
+ pcmc->default_south_bridge = TYPE_PIIX3_DEVICE;
pcmc->pci_root_uid = 0;
pcmc->default_cpu_version = 1;
@@ -482,6 +536,13 @@ static void pc_i440fx_machine_options(MachineClass *m)
m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
+
+ object_class_property_add_enum(oc, "x-south-bridge", "PCSouthBridgeOption",
+ &PCSouthBridgeOption_lookup,
+ pc_get_south_bridge,
+ pc_set_south_bridge);
+ object_class_property_set_description(oc, "x-south-bridge",
+ "Use a different south bridge than PIIX3");
}
static void pc_i440fx_8_2_machine_options(MachineClass *m)
@@ -788,6 +849,7 @@ static void pc_i440fx_2_3_machine_options(MachineClass *m)
{
pc_i440fx_2_4_machine_options(m);
m->hw_version = "2.3.0";
+ m->deprecation_reason = "old and unattended - use a newer version instead";
compat_props_add(m->compat_props, hw_compat_2_3, hw_compat_2_3_len);
compat_props_add(m->compat_props, pc_compat_2_3, pc_compat_2_3_len);
}
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index a7386f2ca2..597943ff1b 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -242,11 +242,18 @@ static void pc_q35_init(MachineState *machine)
host_bus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pcie.0"));
pcms->bus = host_bus;
+ /* irq lines */
+ gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled);
+
/* create ISA bus */
lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC),
TYPE_ICH9_LPC_DEVICE);
qdev_prop_set_bit(DEVICE(lpc), "smm-enabled",
x86_machine_is_smm_enabled(x86ms));
+ lpc_dev = DEVICE(lpc);
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, x86ms->gsi[i]);
+ }
pci_realize_and_unref(lpc, host_bus, &error_fatal);
rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(lpc), "rtc"));
@@ -273,13 +280,6 @@ static void pc_q35_init(MachineState *machine)
"true", true);
}
- /* irq lines */
- gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled);
-
- lpc_dev = DEVICE(lpc);
- for (i = 0; i < IOAPIC_NUM_PINS; i++) {
- qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, x86ms->gsi[i]);
- }
isa_bus = ISA_BUS(qdev_get_child_bus(lpc_dev, "isa.0"));
if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) {
diff --git a/hw/input/adb-kbd.c b/hw/input/adb-kbd.c
index a9088c910c..e21edf9acd 100644
--- a/hw/input/adb-kbd.c
+++ b/hw/input/adb-kbd.c
@@ -355,7 +355,7 @@ static void adb_kbd_reset(DeviceState *dev)
s->count = 0;
}
-static QemuInputHandler adb_keyboard_handler = {
+static const QemuInputHandler adb_keyboard_handler = {
.name = "QEMU ADB Keyboard",
.mask = INPUT_EVENT_MASK_KEY,
.event = adb_keyboard_event,
diff --git a/hw/input/hid.c b/hw/input/hid.c
index a9c7dd1ce1..b8e85374ca 100644
--- a/hw/input/hid.c
+++ b/hw/input/hid.c
@@ -510,20 +510,20 @@ void hid_free(HIDState *hs)
hid_del_idle_timer(hs);
}
-static QemuInputHandler hid_keyboard_handler = {
+static const QemuInputHandler hid_keyboard_handler = {
.name = "QEMU HID Keyboard",
.mask = INPUT_EVENT_MASK_KEY,
.event = hid_keyboard_event,
};
-static QemuInputHandler hid_mouse_handler = {
+static const QemuInputHandler hid_mouse_handler = {
.name = "QEMU HID Mouse",
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
.event = hid_pointer_event,
.sync = hid_pointer_sync,
};
-static QemuInputHandler hid_tablet_handler = {
+static const QemuInputHandler hid_tablet_handler = {
.name = "QEMU HID Tablet",
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS,
.event = hid_pointer_event,
diff --git a/hw/input/lasips2.c b/hw/input/lasips2.c
index ea7c07a2ba..6075121b72 100644
--- a/hw/input/lasips2.c
+++ b/hw/input/lasips2.c
@@ -351,6 +351,11 @@ static void lasips2_port_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ /*
+ * The PS/2 mouse port is integreal part of LASI and can not be
+ * created by users without LASI.
+ */
+ dc->user_creatable = false;
dc->realize = lasips2_port_realize;
}
@@ -397,6 +402,11 @@ static void lasips2_kbd_port_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_CLASS(klass);
+ /*
+ * The PS/2 keyboard port is integreal part of LASI and can not be
+ * created by users without LASI.
+ */
+ dc->user_creatable = false;
device_class_set_parent_realize(dc, lasips2_kbd_port_realize,
&lpdc->parent_realize);
}
diff --git a/hw/input/ps2.c b/hw/input/ps2.c
index 45af76a837..c8fd23cf36 100644
--- a/hw/input/ps2.c
+++ b/hw/input/ps2.c
@@ -1231,7 +1231,7 @@ static const VMStateDescription vmstate_ps2_mouse = {
}
};
-static QemuInputHandler ps2_keyboard_handler = {
+static const QemuInputHandler ps2_keyboard_handler = {
.name = "QEMU PS/2 Keyboard",
.mask = INPUT_EVENT_MASK_KEY,
.event = ps2_keyboard_event,
@@ -1242,7 +1242,7 @@ static void ps2_kbd_realize(DeviceState *dev, Error **errp)
qemu_input_handler_register(dev, &ps2_keyboard_handler);
}
-static QemuInputHandler ps2_mouse_handler = {
+static const QemuInputHandler ps2_mouse_handler = {
.name = "QEMU PS/2 Mouse",
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
.event = ps2_mouse_event,
diff --git a/hw/input/virtio-input-hid.c b/hw/input/virtio-input-hid.c
index 7053ad72d4..45e4d4c75d 100644
--- a/hw/input/virtio-input-hid.c
+++ b/hw/input/virtio-input-hid.c
@@ -265,7 +265,7 @@ static const TypeInfo virtio_input_hid_info = {
/* ----------------------------------------------------------------- */
-static QemuInputHandler virtio_keyboard_handler = {
+static const QemuInputHandler virtio_keyboard_handler = {
.name = VIRTIO_ID_NAME_KEYBOARD,
.mask = INPUT_EVENT_MASK_KEY,
.event = virtio_input_handle_event,
@@ -322,7 +322,7 @@ static const TypeInfo virtio_keyboard_info = {
/* ----------------------------------------------------------------- */
-static QemuInputHandler virtio_mouse_handler = {
+static const QemuInputHandler virtio_mouse_handler = {
.name = VIRTIO_ID_NAME_MOUSE,
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
.event = virtio_input_handle_event,
@@ -416,7 +416,7 @@ static const TypeInfo virtio_mouse_info = {
/* ----------------------------------------------------------------- */
-static QemuInputHandler virtio_tablet_handler = {
+static const QemuInputHandler virtio_tablet_handler = {
.name = VIRTIO_ID_NAME_TABLET,
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS,
.event = virtio_input_handle_event,
@@ -541,7 +541,7 @@ static const TypeInfo virtio_tablet_info = {
/* ----------------------------------------------------------------- */
-static QemuInputHandler virtio_multitouch_handler = {
+static const QemuInputHandler virtio_multitouch_handler = {
.name = VIRTIO_ID_NAME_MULTITOUCH,
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_MTT,
.event = virtio_input_handle_event,
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index 68ad30e2f5..bccb4241c2 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -257,6 +257,7 @@ static const VMStateDescription vmstate_apic_common;
static void apic_common_realize(DeviceState *dev, Error **errp)
{
+ ERRP_GUARD();
APICCommonState *s = APIC_COMMON(dev);
APICCommonClass *info;
static DeviceState *vapic;
@@ -267,6 +268,9 @@ static void apic_common_realize(DeviceState *dev, Error **errp)
info = APIC_COMMON_GET_CLASS(s);
info->realize(dev, errp);
+ if (*errp) {
+ return;
+ }
/* Note: We need at least 1M to map the VAPIC option ROM */
if (!vapic && s->vapic_control & VAPIC_ENABLE_MASK &&
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 1d588946bc..e0d9e512a3 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -516,8 +516,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
if (!kvm_arm_gic_can_save_restore(s)) {
error_setg(&s->migration_blocker, "This operating system kernel does "
"not support vGICv2 migration");
- if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
- error_free(s->migration_blocker);
+ if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
return;
}
}
diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c
index abaf77057e..fddd6d490c 100644
--- a/hw/intc/arm_gicv3_its_common.c
+++ b/hw/intc/arm_gicv3_its_common.c
@@ -163,8 +163,7 @@ type_init(gicv3_its_common_register_types)
const char *its_class_name(void)
{
if (kvm_irqchip_in_kernel()) {
- /* KVM implementation requires this capability */
- return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL;
+ return "arm-its-kvm";
} else {
/* Software emulation based model */
return "arm-gicv3-its";
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
index 7eda9fb86e..f7df602cff 100644
--- a/hw/intc/arm_gicv3_its_kvm.c
+++ b/hw/intc/arm_gicv3_its_kvm.c
@@ -114,8 +114,7 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
GITS_CTLR)) {
error_setg(&s->migration_blocker, "This operating system kernel "
"does not support vITS migration");
- if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
- error_free(s->migration_blocker);
+ if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
return;
}
} else {
@@ -124,7 +123,7 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
kvm_msi_use_devid = true;
kvm_gsi_direct_mapping = false;
- kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
+ kvm_msi_via_irqfd_allowed = true;
}
/**
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index 72ad916d3d..77eb37e131 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -878,8 +878,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
GICD_CTLR)) {
error_setg(&s->migration_blocker, "This operating system kernel does "
"not support vGICv3 migration");
- if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
- error_free(s->migration_blocker);
+ if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
return;
}
}
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
index 7f701d414b..199c261b07 100644
--- a/hw/intc/spapr_xive.c
+++ b/hw/intc/spapr_xive.c
@@ -316,7 +316,6 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
return;
}
- sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
/*
* Initialize the END ESB source
@@ -328,7 +327,6 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
return;
}
- sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
/* Set the mapping address of the END ESB pages after the source ESBs */
xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
@@ -347,15 +345,17 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
/* TIMA initialization */
memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops,
xive, "xive.tima", 4ull << TM_SHIFT);
- sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
/*
* Map all regions. These will be enabled or disabled at reset and
* can also be overridden by KVM memory regions if active
*/
- sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
- sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
- sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
+ memory_region_add_subregion(get_system_memory(), xive->vc_base,
+ &xsrc->esb_mmio);
+ memory_region_add_subregion(get_system_memory(), xive->end_base,
+ &end_xsrc->esb_mmio);
+ memory_region_add_subregion(get_system_memory(), xive->tm_base,
+ &xive->tm_mmio);
}
static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
diff --git a/hw/isa/Kconfig b/hw/isa/Kconfig
index c10cbc5fc1..040a18c070 100644
--- a/hw/isa/Kconfig
+++ b/hw/isa/Kconfig
@@ -31,13 +31,7 @@ config PC87312
select FDC_ISA
select IDE_ISA
-config PIIX3
- bool
- select I8257
- select ISA_BUS
- select MC146818RTC
-
-config PIIX4
+config PIIX
bool
# For historical reasons, SuperIO devices are created in the board
# for PIIX4.
diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c
index 63e0857208..79ffbb52a0 100644
--- a/hw/isa/i82378.c
+++ b/hw/isa/i82378.c
@@ -67,6 +67,7 @@ static void i82378_realize(PCIDevice *pci, Error **errp)
uint8_t *pci_conf;
ISABus *isabus;
ISADevice *pit;
+ ISADevice *pcspk;
pci_conf = pci->config;
pci_set_word(pci_conf + PCI_COMMAND,
@@ -102,7 +103,9 @@ static void i82378_realize(PCIDevice *pci, Error **errp)
pit = i8254_pit_init(isabus, 0x40, 0, NULL);
/* speaker */
- pcspk_init(isa_new(TYPE_PC_SPEAKER), isabus, pit);
+ pcspk = isa_new(TYPE_PC_SPEAKER);
+ object_property_set_link(OBJECT(pcspk), "pit", OBJECT(pit), &error_fatal);
+ isa_realize_and_unref(pcspk, isabus, &error_fatal);
/* 2 82C37 (dma) */
isa_create_simple(isabus, "i82374");
diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c
index a289eccfb1..f1e0f14007 100644
--- a/hw/isa/isa-bus.c
+++ b/hw/isa/isa-bus.c
@@ -52,18 +52,25 @@ static const TypeInfo isa_bus_info = {
ISABus *isa_bus_new(DeviceState *dev, MemoryRegion* address_space,
MemoryRegion *address_space_io, Error **errp)
{
+ DeviceState *bridge = NULL;
+
if (isabus) {
error_setg(errp, "Can't create a second ISA bus");
return NULL;
}
if (!dev) {
- dev = qdev_new("isabus-bridge");
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ bridge = qdev_new("isabus-bridge");
+ dev = bridge;
}
isabus = ISA_BUS(qbus_new(TYPE_ISA_BUS, dev, NULL));
isabus->address_space = address_space;
isabus->address_space_io = address_space_io;
+
+ if (bridge) {
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(bridge), &error_fatal);
+ }
+
return isabus;
}
diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c
index 3f59980aa0..23eba64f22 100644
--- a/hw/isa/lpc_ich9.c
+++ b/hw/isa/lpc_ich9.c
@@ -675,6 +675,9 @@ static void ich9_lpc_initfn(Object *obj)
object_initialize_child(obj, "rtc", &lpc->rtc, TYPE_MC146818_RTC);
+ qdev_init_gpio_out_named(DEVICE(lpc), lpc->gsi, ICH9_GPIO_GSI,
+ IOAPIC_NUM_PINS);
+
object_property_add_uint8_ptr(obj, ACPI_PM_PROP_SCI_INT,
&lpc->sci_gsi, OBJ_PROP_FLAG_READ);
object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_ENABLE_CMD,
@@ -691,9 +694,9 @@ static void ich9_lpc_initfn(Object *obj)
static void ich9_lpc_realize(PCIDevice *d, Error **errp)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
- DeviceState *dev = DEVICE(d);
PCIBus *pci_bus = pci_get_bus(d);
ISABus *isa_bus;
+ uint32_t irq;
if ((lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)) &&
!(lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT))) {
@@ -734,8 +737,6 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
ICH9_RST_CNT_IOPORT, &lpc->rst_cnt_mem,
1);
- qdev_init_gpio_out_named(dev, lpc->gsi, ICH9_GPIO_GSI, IOAPIC_NUM_PINS);
-
isa_bus_register_input_irqs(isa_bus, lpc->gsi);
i8257_dma_init(isa_bus, 0);
@@ -745,6 +746,8 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
if (!qdev_realize(DEVICE(&lpc->rtc), BUS(isa_bus), errp)) {
return;
}
+ irq = object_property_get_uint(OBJECT(&lpc->rtc), "irq", &error_fatal);
+ isa_connect_gpio_out(ISA_DEVICE(&lpc->rtc), 0, irq);
pci_bus_irqs(pci_bus, ich9_lpc_set_irq, d, ICH9_LPC_NB_PIRQS);
pci_bus_map_irqs(pci_bus, ich9_lpc_map_irq);
diff --git a/hw/isa/meson.build b/hw/isa/meson.build
index b855e81276..2ab99ce0c6 100644
--- a/hw/isa/meson.build
+++ b/hw/isa/meson.build
@@ -3,8 +3,7 @@ system_ss.add(when: 'CONFIG_I82378', if_true: files('i82378.c'))
system_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('isa-bus.c'))
system_ss.add(when: 'CONFIG_ISA_SUPERIO', if_true: files('isa-superio.c'))
system_ss.add(when: 'CONFIG_PC87312', if_true: files('pc87312.c'))
-system_ss.add(when: 'CONFIG_PIIX3', if_true: files('piix3.c'))
-system_ss.add(when: 'CONFIG_PIIX4', if_true: files('piix4.c'))
+system_ss.add(when: 'CONFIG_PIIX', if_true: files('piix.c'))
system_ss.add(when: 'CONFIG_SMC37C669', if_true: files('smc37c669-superio.c'))
system_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686.c'))
diff --git a/hw/isa/piix3.c b/hw/isa/piix.c
index 117024e450..04ebed5b52 100644
--- a/hw/isa/piix3.c
+++ b/hw/isa/piix.c
@@ -2,6 +2,7 @@
* QEMU PIIX PCI ISA Bridge Emulation
*
* Copyright (c) 2006 Fabrice Bellard
+ * Copyright (c) 2018 Hervé Poussineau
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -27,63 +28,72 @@
#include "qapi/error.h"
#include "hw/dma/i8257.h"
#include "hw/southbridge/piix.h"
+#include "hw/timer/i8254.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
+#include "hw/ide/piix.h"
+#include "hw/intc/i8259.h"
#include "hw/isa/isa.h"
#include "sysemu/runstate.h"
#include "migration/vmstate.h"
#include "hw/acpi/acpi_aml_interface.h"
-static void piix3_set_irq_pic(PIIX3State *piix3, int pic_irq)
+static void piix_set_irq_pic(PIIXState *s, int pic_irq)
{
- qemu_set_irq(piix3->pic[pic_irq],
- !!(piix3->pic_levels &
+ qemu_set_irq(s->isa_irqs_in[pic_irq],
+ !!(s->pic_levels &
(((1ULL << PIIX_NUM_PIRQS) - 1) <<
(pic_irq * PIIX_NUM_PIRQS))));
}
-static void piix3_set_irq_level_internal(PIIX3State *piix3, int pirq, int level)
+static void piix_set_pci_irq_level_internal(PIIXState *s, int pirq, int level)
{
int pic_irq;
uint64_t mask;
- pic_irq = piix3->dev.config[PIIX_PIRQCA + pirq];
- if (pic_irq >= PIIX_NUM_PIC_IRQS) {
+ pic_irq = s->dev.config[PIIX_PIRQCA + pirq];
+ if (pic_irq >= ISA_NUM_IRQS) {
return;
}
mask = 1ULL << ((pic_irq * PIIX_NUM_PIRQS) + pirq);
- piix3->pic_levels &= ~mask;
- piix3->pic_levels |= mask * !!level;
+ s->pic_levels &= ~mask;
+ s->pic_levels |= mask * !!level;
}
-static void piix3_set_irq_level(PIIX3State *piix3, int pirq, int level)
+static void piix_set_pci_irq_level(PIIXState *s, int pirq, int level)
{
int pic_irq;
- pic_irq = piix3->dev.config[PIIX_PIRQCA + pirq];
- if (pic_irq >= PIIX_NUM_PIC_IRQS) {
+ pic_irq = s->dev.config[PIIX_PIRQCA + pirq];
+ if (pic_irq >= ISA_NUM_IRQS) {
return;
}
- piix3_set_irq_level_internal(piix3, pirq, level);
+ piix_set_pci_irq_level_internal(s, pirq, level);
- piix3_set_irq_pic(piix3, pic_irq);
+ piix_set_irq_pic(s, pic_irq);
}
-static void piix3_set_irq(void *opaque, int pirq, int level)
+static void piix_set_pci_irq(void *opaque, int pirq, int level)
{
- PIIX3State *piix3 = opaque;
- piix3_set_irq_level(piix3, pirq, level);
+ PIIXState *s = opaque;
+ piix_set_pci_irq_level(s, pirq, level);
}
-static PCIINTxRoute piix3_route_intx_pin_to_irq(void *opaque, int pin)
+static void piix_request_i8259_irq(void *opaque, int irq, int level)
{
- PIIX3State *piix3 = opaque;
- int irq = piix3->dev.config[PIIX_PIRQCA + pin];
+ PIIXState *s = opaque;
+ qemu_set_irq(s->cpu_intr, level);
+}
+
+static PCIINTxRoute piix_route_intx_pin_to_irq(void *opaque, int pin)
+{
+ PCIDevice *pci_dev = opaque;
+ int irq = pci_dev->config[PIIX_PIRQCA + pin];
PCIINTxRoute route;
- if (irq < PIIX_NUM_PIC_IRQS) {
+ if (irq < ISA_NUM_IRQS) {
route.mode = PCI_INTX_ENABLED;
route.irq = irq;
} else {
@@ -94,36 +104,36 @@ static PCIINTxRoute piix3_route_intx_pin_to_irq(void *opaque, int pin)
}
/* irq routing is changed. so rebuild bitmap */
-static void piix3_update_irq_levels(PIIX3State *piix3)
+static void piix_update_pci_irq_levels(PIIXState *s)
{
- PCIBus *bus = pci_get_bus(&piix3->dev);
+ PCIBus *bus = pci_get_bus(&s->dev);
int pirq;
- piix3->pic_levels = 0;
+ s->pic_levels = 0;
for (pirq = 0; pirq < PIIX_NUM_PIRQS; pirq++) {
- piix3_set_irq_level(piix3, pirq, pci_bus_get_irq_level(bus, pirq));
+ piix_set_pci_irq_level(s, pirq, pci_bus_get_irq_level(bus, pirq));
}
}
-static void piix3_write_config(PCIDevice *dev,
- uint32_t address, uint32_t val, int len)
+static void piix_write_config(PCIDevice *dev, uint32_t address, uint32_t val,
+ int len)
{
pci_default_write_config(dev, address, val, len);
if (ranges_overlap(address, len, PIIX_PIRQCA, 4)) {
- PIIX3State *piix3 = PIIX3_PCI_DEVICE(dev);
+ PIIXState *s = PIIX_PCI_DEVICE(dev);
int pic_irq;
- pci_bus_fire_intx_routing_notifier(pci_get_bus(&piix3->dev));
- piix3_update_irq_levels(piix3);
- for (pic_irq = 0; pic_irq < PIIX_NUM_PIC_IRQS; pic_irq++) {
- piix3_set_irq_pic(piix3, pic_irq);
+ pci_bus_fire_intx_routing_notifier(pci_get_bus(&s->dev));
+ piix_update_pci_irq_levels(s);
+ for (pic_irq = 0; pic_irq < ISA_NUM_IRQS; pic_irq++) {
+ piix_set_irq_pic(s, pic_irq);
}
}
}
-static void piix3_reset(DeviceState *dev)
+static void piix_reset(DeviceState *dev)
{
- PIIX3State *d = PIIX3_PCI_DEVICE(dev);
+ PIIXState *d = PIIX_PCI_DEVICE(dev);
uint8_t *pci_conf = d->dev.config;
pci_conf[0x04] = 0x07; /* master, memory and I/O */
@@ -162,9 +172,9 @@ static void piix3_reset(DeviceState *dev)
d->rcr = 0;
}
-static int piix3_post_load(void *opaque, int version_id)
+static int piix_post_load(void *opaque, int version_id)
{
- PIIX3State *piix3 = opaque;
+ PIIXState *s = opaque;
int pirq;
/*
@@ -176,18 +186,29 @@ static int piix3_post_load(void *opaque, int version_id)
* Here, we update irq levels without raising the interrupt.
* Interrupt state will be deserialized separately through the i8259.
*/
- piix3->pic_levels = 0;
+ s->pic_levels = 0;
for (pirq = 0; pirq < PIIX_NUM_PIRQS; pirq++) {
- piix3_set_irq_level_internal(piix3, pirq,
- pci_bus_get_irq_level(pci_get_bus(&piix3->dev), pirq));
+ piix_set_pci_irq_level_internal(s, pirq,
+ pci_bus_get_irq_level(pci_get_bus(&s->dev), pirq));
}
return 0;
}
+static int piix4_post_load(void *opaque, int version_id)
+{
+ PIIXState *s = opaque;
+
+ if (version_id == 2) {
+ s->rcr = 0;
+ }
+
+ return piix_post_load(opaque, version_id);
+}
+
static int piix3_pre_save(void *opaque)
{
int i;
- PIIX3State *piix3 = opaque;
+ PIIXState *piix3 = opaque;
for (i = 0; i < ARRAY_SIZE(piix3->pci_irq_levels_vmstate); i++) {
piix3->pci_irq_levels_vmstate[i] =
@@ -199,7 +220,7 @@ static int piix3_pre_save(void *opaque)
static bool piix3_rcr_needed(void *opaque)
{
- PIIX3State *piix3 = opaque;
+ PIIXState *piix3 = opaque;
return (piix3->rcr != 0);
}
@@ -210,7 +231,7 @@ static const VMStateDescription vmstate_piix3_rcr = {
.minimum_version_id = 1,
.needed = piix3_rcr_needed,
.fields = (VMStateField[]) {
- VMSTATE_UINT8(rcr, PIIX3State),
+ VMSTATE_UINT8(rcr, PIIXState),
VMSTATE_END_OF_LIST()
}
};
@@ -219,11 +240,11 @@ static const VMStateDescription vmstate_piix3 = {
.name = "PIIX3",
.version_id = 3,
.minimum_version_id = 2,
- .post_load = piix3_post_load,
+ .post_load = piix_post_load,
.pre_save = piix3_pre_save,
.fields = (VMStateField[]) {
- VMSTATE_PCI_DEVICE(dev, PIIX3State),
- VMSTATE_INT32_ARRAY_V(pci_irq_levels_vmstate, PIIX3State,
+ VMSTATE_PCI_DEVICE(dev, PIIXState),
+ VMSTATE_INT32_ARRAY_V(pci_irq_levels_vmstate, PIIXState,
PIIX_NUM_PIRQS, 3),
VMSTATE_END_OF_LIST()
},
@@ -233,10 +254,21 @@ static const VMStateDescription vmstate_piix3 = {
}
};
+static const VMStateDescription vmstate_piix4 = {
+ .name = "PIIX4",
+ .version_id = 3,
+ .minimum_version_id = 2,
+ .post_load = piix4_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(dev, PIIXState),
+ VMSTATE_UINT8_V(rcr, PIIXState, 3),
+ VMSTATE_END_OF_LIST()
+ }
+};
static void rcr_write(void *opaque, hwaddr addr, uint64_t val, unsigned len)
{
- PIIX3State *d = opaque;
+ PIIXState *d = opaque;
if (val & 4) {
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
@@ -247,7 +279,7 @@ static void rcr_write(void *opaque, hwaddr addr, uint64_t val, unsigned len)
static uint64_t rcr_read(void *opaque, hwaddr addr, unsigned len)
{
- PIIX3State *d = opaque;
+ PIIXState *d = opaque;
return d->rcr;
}
@@ -262,10 +294,13 @@ static const MemoryRegionOps rcr_ops = {
},
};
-static void pci_piix3_realize(PCIDevice *dev, Error **errp)
+static void pci_piix_realize(PCIDevice *dev, const char *uhci_type,
+ Error **errp)
{
- PIIX3State *d = PIIX3_PCI_DEVICE(dev);
+ PIIXState *d = PIIX_PCI_DEVICE(dev);
+ PCIBus *pci_bus = pci_get_bus(dev);
ISABus *isa_bus;
+ uint32_t irq;
isa_bus = isa_bus_new(DEVICE(d), pci_address_space(dev),
pci_address_space_io(dev), errp);
@@ -274,10 +309,33 @@ static void pci_piix3_realize(PCIDevice *dev, Error **errp)
}
memory_region_init_io(&d->rcr_mem, OBJECT(dev), &rcr_ops, d,
- "piix3-reset-control", 1);
+ "piix-reset-control", 1);
memory_region_add_subregion_overlap(pci_address_space_io(dev),
PIIX_RCR_IOPORT, &d->rcr_mem, 1);
+ /* PIC */
+ if (d->has_pic) {
+ qemu_irq *i8259_out_irq = qemu_allocate_irqs(piix_request_i8259_irq, d,
+ 1);
+ qemu_irq *i8259 = i8259_init(isa_bus, *i8259_out_irq);
+ size_t i;
+
+ for (i = 0; i < ISA_NUM_IRQS; i++) {
+ d->isa_irqs_in[i] = i8259[i];
+ }
+
+ g_free(i8259);
+
+ qdev_init_gpio_out_named(DEVICE(dev), &d->cpu_intr, "intr", 1);
+ }
+
+ isa_bus_register_input_irqs(isa_bus, d->isa_irqs_in);
+
+ /* PIT */
+ if (d->has_pit) {
+ i8254_pit_init(isa_bus, 0x40, 0, NULL);
+ }
+
i8257_dma_init(isa_bus, 0);
/* RTC */
@@ -285,6 +343,38 @@ static void pci_piix3_realize(PCIDevice *dev, Error **errp)
if (!qdev_realize(DEVICE(&d->rtc), BUS(isa_bus), errp)) {
return;
}
+ irq = object_property_get_uint(OBJECT(&d->rtc), "irq", &error_fatal);
+ isa_connect_gpio_out(ISA_DEVICE(&d->rtc), 0, irq);
+
+ /* IDE */
+ qdev_prop_set_int32(DEVICE(&d->ide), "addr", dev->devfn + 1);
+ if (!qdev_realize(DEVICE(&d->ide), BUS(pci_bus), errp)) {
+ return;
+ }
+
+ /* USB */
+ if (d->has_usb) {
+ object_initialize_child(OBJECT(dev), "uhci", &d->uhci, uhci_type);
+ qdev_prop_set_int32(DEVICE(&d->uhci), "addr", dev->devfn + 2);
+ if (!qdev_realize(DEVICE(&d->uhci), BUS(pci_bus), errp)) {
+ return;
+ }
+ }
+
+ /* Power Management */
+ if (d->has_acpi) {
+ object_initialize_child(OBJECT(d), "pm", &d->pm, TYPE_PIIX4_PM);
+ qdev_prop_set_int32(DEVICE(&d->pm), "addr", dev->devfn + 3);
+ qdev_prop_set_uint32(DEVICE(&d->pm), "smb_io_base", d->smb_io_base);
+ qdev_prop_set_bit(DEVICE(&d->pm), "smm-enabled", d->smm_enabled);
+ if (!qdev_realize(DEVICE(&d->pm), BUS(pci_bus), errp)) {
+ return;
+ }
+ qdev_connect_gpio_out(DEVICE(&d->pm), 0, d->isa_irqs_in[9]);
+ }
+
+ pci_bus_irqs(pci_bus, piix_set_pci_irq, d, PIIX_NUM_PIRQS);
+ pci_bus_set_route_irq_fn(pci_bus, piix_route_intx_pin_to_irq);
}
static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
@@ -308,43 +398,54 @@ static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
qbus_build_aml(bus, scope);
}
-static void pci_piix3_init(Object *obj)
+static void pci_piix_init(Object *obj)
{
- PIIX3State *d = PIIX3_PCI_DEVICE(obj);
+ PIIXState *d = PIIX_PCI_DEVICE(obj);
+
+ qdev_init_gpio_out_named(DEVICE(obj), d->isa_irqs_in, "isa-irqs",
+ ISA_NUM_IRQS);
object_initialize_child(obj, "rtc", &d->rtc, TYPE_MC146818_RTC);
}
-static void pci_piix3_class_init(ObjectClass *klass, void *data)
+static Property pci_piix_props[] = {
+ DEFINE_PROP_UINT32("smb_io_base", PIIXState, smb_io_base, 0),
+ DEFINE_PROP_BOOL("has-acpi", PIIXState, has_acpi, true),
+ DEFINE_PROP_BOOL("has-pic", PIIXState, has_pic, true),
+ DEFINE_PROP_BOOL("has-pit", PIIXState, has_pit, true),
+ DEFINE_PROP_BOOL("has-usb", PIIXState, has_usb, true),
+ DEFINE_PROP_BOOL("smm-enabled", PIIXState, smm_enabled, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pci_piix_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
- k->config_write = piix3_write_config;
- dc->reset = piix3_reset;
+ k->config_write = piix_write_config;
+ dc->reset = piix_reset;
dc->desc = "ISA bridge";
- dc->vmsd = &vmstate_piix3;
dc->hotpluggable = false;
k->vendor_id = PCI_VENDOR_ID_INTEL;
- /* 82371SB PIIX3 PCI-to-ISA bridge (Step A1) */
- k->device_id = PCI_DEVICE_ID_INTEL_82371SB_0;
k->class_id = PCI_CLASS_BRIDGE_ISA;
/*
- * Reason: part of PIIX3 southbridge, needs to be wired up by
+ * Reason: part of PIIX southbridge, needs to be wired up by e.g.
* pc_piix.c's pc_init1()
*/
dc->user_creatable = false;
+ device_class_set_props(dc, pci_piix_props);
adevc->build_dev_aml = build_pci_isa_aml;
}
-static const TypeInfo piix3_pci_type_info = {
- .name = TYPE_PIIX3_PCI_DEVICE,
+static const TypeInfo piix_pci_type_info = {
+ .name = TYPE_PIIX_PCI_DEVICE,
.parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(PIIX3State),
- .instance_init = pci_piix3_init,
+ .instance_size = sizeof(PIIXState),
+ .instance_init = pci_piix_init,
.abstract = true,
- .class_init = pci_piix3_class_init,
+ .class_init = pci_piix_class_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ TYPE_ACPI_DEV_AML_IF },
@@ -354,36 +455,68 @@ static const TypeInfo piix3_pci_type_info = {
static void piix3_realize(PCIDevice *dev, Error **errp)
{
- ERRP_GUARD();
- PIIX3State *piix3 = PIIX3_PCI_DEVICE(dev);
- PCIBus *pci_bus = pci_get_bus(dev);
+ pci_piix_realize(dev, TYPE_PIIX3_USB_UHCI, errp);
+}
- pci_piix3_realize(dev, errp);
- if (*errp) {
- return;
- }
+static void piix3_init(Object *obj)
+{
+ PIIXState *d = PIIX_PCI_DEVICE(obj);
- pci_bus_irqs(pci_bus, piix3_set_irq, piix3, PIIX_NUM_PIRQS);
- pci_bus_set_route_irq_fn(pci_bus, piix3_route_intx_pin_to_irq);
+ object_initialize_child(obj, "ide", &d->ide, TYPE_PIIX3_IDE);
}
static void piix3_class_init(ObjectClass *klass, void *data)
{
+ DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = piix3_realize;
+ /* 82371SB PIIX3 PCI-to-ISA bridge (Step A1) */
+ k->device_id = PCI_DEVICE_ID_INTEL_82371SB_0;
+ dc->vmsd = &vmstate_piix3;
}
static const TypeInfo piix3_info = {
.name = TYPE_PIIX3_DEVICE,
- .parent = TYPE_PIIX3_PCI_DEVICE,
+ .parent = TYPE_PIIX_PCI_DEVICE,
+ .instance_init = piix3_init,
.class_init = piix3_class_init,
};
+static void piix4_realize(PCIDevice *dev, Error **errp)
+{
+ pci_piix_realize(dev, TYPE_PIIX4_USB_UHCI, errp);
+}
+
+static void piix4_init(Object *obj)
+{
+ PIIXState *s = PIIX_PCI_DEVICE(obj);
+
+ object_initialize_child(obj, "ide", &s->ide, TYPE_PIIX4_IDE);
+}
+
+static void piix4_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = piix4_realize;
+ k->device_id = PCI_DEVICE_ID_INTEL_82371AB_0;
+ dc->vmsd = &vmstate_piix4;
+}
+
+static const TypeInfo piix4_info = {
+ .name = TYPE_PIIX4_PCI_DEVICE,
+ .parent = TYPE_PIIX_PCI_DEVICE,
+ .instance_init = piix4_init,
+ .class_init = piix4_class_init,
+};
+
static void piix3_register_types(void)
{
- type_register_static(&piix3_pci_type_info);
+ type_register_static(&piix_pci_type_info);
type_register_static(&piix3_info);
+ type_register_static(&piix4_info);
}
type_init(piix3_register_types)
diff --git a/hw/isa/piix4.c b/hw/isa/piix4.c
deleted file mode 100644
index e0b149f8eb..0000000000
--- a/hw/isa/piix4.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * QEMU PIIX4 PCI Bridge Emulation
- *
- * Copyright (c) 2006 Fabrice Bellard
- * Copyright (c) 2018 Hervé Poussineau
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "hw/irq.h"
-#include "hw/southbridge/piix.h"
-#include "hw/pci/pci.h"
-#include "hw/ide/piix.h"
-#include "hw/isa/isa.h"
-#include "hw/intc/i8259.h"
-#include "hw/dma/i8257.h"
-#include "hw/timer/i8254.h"
-#include "hw/rtc/mc146818rtc.h"
-#include "hw/ide/pci.h"
-#include "hw/acpi/piix4.h"
-#include "hw/usb/hcd-uhci.h"
-#include "migration/vmstate.h"
-#include "sysemu/reset.h"
-#include "sysemu/runstate.h"
-#include "qom/object.h"
-
-struct PIIX4State {
- PCIDevice dev;
- qemu_irq cpu_intr;
- qemu_irq *isa;
-
- MC146818RtcState rtc;
- PCIIDEState ide;
- UHCIState uhci;
- PIIX4PMState pm;
- /* Reset Control Register */
- MemoryRegion rcr_mem;
- uint8_t rcr;
-};
-
-OBJECT_DECLARE_SIMPLE_TYPE(PIIX4State, PIIX4_PCI_DEVICE)
-
-static void piix4_set_irq(void *opaque, int irq_num, int level)
-{
- int i, pic_irq, pic_level;
- PIIX4State *s = opaque;
- PCIBus *bus = pci_get_bus(&s->dev);
-
- /* now we change the pic irq level according to the piix irq mappings */
- /* XXX: optimize */
- pic_irq = s->dev.config[PIIX_PIRQCA + irq_num];
- if (pic_irq < ISA_NUM_IRQS) {
- /* The pic level is the logical OR of all the PCI irqs mapped to it. */
- pic_level = 0;
- for (i = 0; i < PIIX_NUM_PIRQS; i++) {
- if (pic_irq == s->dev.config[PIIX_PIRQCA + i]) {
- pic_level |= pci_bus_get_irq_level(bus, i);
- }
- }
- qemu_set_irq(s->isa[pic_irq], pic_level);
- }
-}
-
-static void piix4_isa_reset(DeviceState *dev)
-{
- PIIX4State *d = PIIX4_PCI_DEVICE(dev);
- uint8_t *pci_conf = d->dev.config;
-
- pci_conf[0x04] = 0x07; // master, memory and I/O
- pci_conf[0x05] = 0x00;
- pci_conf[0x06] = 0x00;
- pci_conf[0x07] = 0x02; // PCI_status_devsel_medium
- pci_conf[0x4c] = 0x4d;
- pci_conf[0x4e] = 0x03;
- pci_conf[0x4f] = 0x00;
- pci_conf[0x60] = 0x80;
- pci_conf[0x61] = 0x80;
- pci_conf[0x62] = 0x80;
- pci_conf[0x63] = 0x80;
- pci_conf[0x69] = 0x02;
- pci_conf[0x70] = 0x80;
- pci_conf[0x76] = 0x0c;
- pci_conf[0x77] = 0x0c;
- pci_conf[0x78] = 0x02;
- pci_conf[0x79] = 0x00;
- pci_conf[0x80] = 0x00;
- pci_conf[0x82] = 0x00;
- pci_conf[0xa0] = 0x08;
- pci_conf[0xa2] = 0x00;
- pci_conf[0xa3] = 0x00;
- pci_conf[0xa4] = 0x00;
- pci_conf[0xa5] = 0x00;
- pci_conf[0xa6] = 0x00;
- pci_conf[0xa7] = 0x00;
- pci_conf[0xa8] = 0x0f;
- pci_conf[0xaa] = 0x00;
- pci_conf[0xab] = 0x00;
- pci_conf[0xac] = 0x00;
- pci_conf[0xae] = 0x00;
-
- d->rcr = 0;
-}
-
-static int piix4_post_load(void *opaque, int version_id)
-{
- PIIX4State *s = opaque;
-
- if (version_id == 2) {
- s->rcr = 0;
- }
-
- return 0;
-}
-
-static const VMStateDescription vmstate_piix4 = {
- .name = "PIIX4",
- .version_id = 3,
- .minimum_version_id = 2,
- .post_load = piix4_post_load,
- .fields = (VMStateField[]) {
- VMSTATE_PCI_DEVICE(dev, PIIX4State),
- VMSTATE_UINT8_V(rcr, PIIX4State, 3),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void piix4_request_i8259_irq(void *opaque, int irq, int level)
-{
- PIIX4State *s = opaque;
- qemu_set_irq(s->cpu_intr, level);
-}
-
-static void piix4_set_i8259_irq(void *opaque, int irq, int level)
-{
- PIIX4State *s = opaque;
- qemu_set_irq(s->isa[irq], level);
-}
-
-static void piix4_rcr_write(void *opaque, hwaddr addr, uint64_t val,
- unsigned int len)
-{
- PIIX4State *s = opaque;
-
- if (val & 4) {
- qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
- return;
- }
-
- s->rcr = val & 2; /* keep System Reset type only */
-}
-
-static uint64_t piix4_rcr_read(void *opaque, hwaddr addr, unsigned int len)
-{
- PIIX4State *s = opaque;
-
- return s->rcr;
-}
-
-static const MemoryRegionOps piix4_rcr_ops = {
- .read = piix4_rcr_read,
- .write = piix4_rcr_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
- .impl = {
- .min_access_size = 1,
- .max_access_size = 1,
- },
-};
-
-static void piix4_realize(PCIDevice *dev, Error **errp)
-{
- PIIX4State *s = PIIX4_PCI_DEVICE(dev);
- PCIBus *pci_bus = pci_get_bus(dev);
- ISABus *isa_bus;
- qemu_irq *i8259_out_irq;
-
- isa_bus = isa_bus_new(DEVICE(dev), pci_address_space(dev),
- pci_address_space_io(dev), errp);
- if (!isa_bus) {
- return;
- }
-
- qdev_init_gpio_in_named(DEVICE(dev), piix4_set_i8259_irq,
- "isa", ISA_NUM_IRQS);
- qdev_init_gpio_out_named(DEVICE(dev), &s->cpu_intr,
- "intr", 1);
-
- memory_region_init_io(&s->rcr_mem, OBJECT(dev), &piix4_rcr_ops, s,
- "reset-control", 1);
- memory_region_add_subregion_overlap(pci_address_space_io(dev),
- PIIX_RCR_IOPORT, &s->rcr_mem, 1);
-
- /* initialize i8259 pic */
- i8259_out_irq = qemu_allocate_irqs(piix4_request_i8259_irq, s, 1);
- s->isa = i8259_init(isa_bus, *i8259_out_irq);
-
- /* initialize ISA irqs */
- isa_bus_register_input_irqs(isa_bus, s->isa);
-
- /* initialize pit */
- i8254_pit_init(isa_bus, 0x40, 0, NULL);
-
- /* DMA */
- i8257_dma_init(isa_bus, 0);
-
- /* RTC */
- qdev_prop_set_int32(DEVICE(&s->rtc), "base_year", 2000);
- if (!qdev_realize(DEVICE(&s->rtc), BUS(isa_bus), errp)) {
- return;
- }
- s->rtc.irq = isa_get_irq(ISA_DEVICE(&s->rtc), s->rtc.isairq);
-
- /* IDE */
- qdev_prop_set_int32(DEVICE(&s->ide), "addr", dev->devfn + 1);
- if (!qdev_realize(DEVICE(&s->ide), BUS(pci_bus), errp)) {
- return;
- }
-
- /* USB */
- qdev_prop_set_int32(DEVICE(&s->uhci), "addr", dev->devfn + 2);
- if (!qdev_realize(DEVICE(&s->uhci), BUS(pci_bus), errp)) {
- return;
- }
-
- /* ACPI controller */
- qdev_prop_set_int32(DEVICE(&s->pm), "addr", dev->devfn + 3);
- if (!qdev_realize(DEVICE(&s->pm), BUS(pci_bus), errp)) {
- return;
- }
- qdev_connect_gpio_out(DEVICE(&s->pm), 0, s->isa[9]);
-
- pci_bus_irqs(pci_bus, piix4_set_irq, s, PIIX_NUM_PIRQS);
-}
-
-static void piix4_init(Object *obj)
-{
- PIIX4State *s = PIIX4_PCI_DEVICE(obj);
-
- object_initialize_child(obj, "rtc", &s->rtc, TYPE_MC146818_RTC);
- object_initialize_child(obj, "ide", &s->ide, TYPE_PIIX4_IDE);
- object_initialize_child(obj, "uhci", &s->uhci, TYPE_PIIX4_USB_UHCI);
-
- object_initialize_child(obj, "pm", &s->pm, TYPE_PIIX4_PM);
- qdev_prop_set_uint32(DEVICE(&s->pm), "smb_io_base", 0x1100);
- qdev_prop_set_bit(DEVICE(&s->pm), "smm-enabled", 0);
-}
-
-static void piix4_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
- PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
-
- k->realize = piix4_realize;
- k->vendor_id = PCI_VENDOR_ID_INTEL;
- k->device_id = PCI_DEVICE_ID_INTEL_82371AB_0;
- k->class_id = PCI_CLASS_BRIDGE_ISA;
- dc->reset = piix4_isa_reset;
- dc->desc = "ISA bridge";
- dc->vmsd = &vmstate_piix4;
- /*
- * Reason: part of PIIX4 southbridge, needs to be wired up,
- * e.g. by mips_malta_init()
- */
- dc->user_creatable = false;
- dc->hotpluggable = false;
-}
-
-static const TypeInfo piix4_info = {
- .name = TYPE_PIIX4_PCI_DEVICE,
- .parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(PIIX4State),
- .instance_init = piix4_init,
- .class_init = piix4_class_init,
- .interfaces = (InterfaceInfo[]) {
- { INTERFACE_CONVENTIONAL_PCI_DEVICE },
- { },
- },
-};
-
-static void piix4_register_types(void)
-{
- type_register_static(&piix4_info);
-}
-
-type_init(piix4_register_types)
diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c
index 2952fe452e..4b7dc67a2d 100644
--- a/hw/loongarch/virt.c
+++ b/hw/loongarch/virt.c
@@ -412,6 +412,7 @@ static DeviceState *create_acpi_ged(DeviceState *pch_pic, LoongArchMachineState
}
dev = qdev_new(TYPE_ACPI_GED);
qdev_prop_set_uint32(dev, "ged-event", event);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
/* ged event */
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, VIRT_GED_EVT_ADDR);
@@ -422,7 +423,6 @@ static DeviceState *create_acpi_ged(DeviceState *pch_pic, LoongArchMachineState
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - VIRT_GSI_BASE));
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
return dev;
}
diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig
index da3a37e215..ac1eb06a51 100644
--- a/hw/mips/Kconfig
+++ b/hw/mips/Kconfig
@@ -2,7 +2,7 @@ config MALTA
bool
select GT64120
select ISA_SUPERIO
- select PIIX4
+ select PIIX
config MIPSSIM
bool
diff --git a/hw/mips/cps.c b/hw/mips/cps.c
index 2b5269ebf1..b6612c1762 100644
--- a/hw/mips/cps.c
+++ b/hw/mips/cps.c
@@ -24,7 +24,6 @@
#include "hw/mips/mips.h"
#include "hw/qdev-clock.h"
#include "hw/qdev-properties.h"
-#include "hw/mips/cpudevs.h"
#include "sysemu/kvm.h"
#include "sysemu/reset.h"
diff --git a/hw/mips/fuloong2e.c b/hw/mips/fuloong2e.c
index c6109633fe..97b2c8ed8e 100644
--- a/hw/mips/fuloong2e.c
+++ b/hw/mips/fuloong2e.c
@@ -30,7 +30,6 @@
#include "hw/block/flash.h"
#include "hw/mips/mips.h"
#include "hw/mips/bootloader.h"
-#include "hw/mips/cpudevs.h"
#include "hw/pci/pci.h"
#include "hw/loader.h"
#include "hw/ide/pci.h"
diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c
index c32d2b0b0a..d33a76ad4d 100644
--- a/hw/mips/jazz.c
+++ b/hw/mips/jazz.c
@@ -26,7 +26,6 @@
#include "qemu/datadir.h"
#include "hw/clock.h"
#include "hw/mips/mips.h"
-#include "hw/mips/cpudevs.h"
#include "hw/intc/i8259.h"
#include "hw/dma/i8257.h"
#include "hw/char/serial.h"
@@ -177,6 +176,7 @@ static void mips_jazz_init(MachineState *machine,
SysBusDevice *sysbus;
ISABus *isa_bus;
ISADevice *pit;
+ ISADevice *pcspk;
DriveInfo *fds[MAX_FD];
MemoryRegion *bios = g_new(MemoryRegion, 1);
MemoryRegion *bios2 = g_new(MemoryRegion, 1);
@@ -279,7 +279,9 @@ static void mips_jazz_init(MachineState *machine,
isa_bus_register_input_irqs(isa_bus, i8259);
i8257_dma_init(isa_bus, 0);
pit = i8254_pit_init(isa_bus, 0x40, 0, NULL);
- pcspk_init(isa_new(TYPE_PC_SPEAKER), isa_bus, pit);
+ pcspk = isa_new(TYPE_PC_SPEAKER);
+ object_property_set_link(OBJECT(pcspk), "pit", OBJECT(pit), &error_fatal);
+ isa_realize_and_unref(pcspk, isa_bus, &error_fatal);
/* Video card */
switch (jazz_model) {
diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c
index b74b358874..33eae01eca 100644
--- a/hw/mips/loongson3_virt.c
+++ b/hw/mips/loongson3_virt.c
@@ -32,7 +32,6 @@
#include "hw/char/serial.h"
#include "hw/intc/loongson_liointc.h"
#include "hw/mips/mips.h"
-#include "hw/mips/cpudevs.h"
#include "hw/mips/fw_cfg.h"
#include "hw/mips/loongson3_bootp.h"
#include "hw/misc/unimp.h"
diff --git a/hw/mips/malta.c b/hw/mips/malta.c
index dac27fad9d..049de46a9e 100644
--- a/hw/mips/malta.c
+++ b/hw/mips/malta.c
@@ -37,7 +37,6 @@
#include "hw/block/flash.h"
#include "hw/mips/mips.h"
#include "hw/mips/bootloader.h"
-#include "hw/mips/cpudevs.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "qemu/log.h"
@@ -206,7 +205,7 @@ static eeprom24c0x_t spd_eeprom = {
static void generate_eeprom_spd(uint8_t *eeprom, ram_addr_t ram_size)
{
- enum { SDR = 0x4, DDR2 = 0x8 } type;
+ enum sdram_type type;
uint8_t *spd = spd_eeprom.contents;
uint8_t nbanks = 0;
uint16_t density = 0;
@@ -1238,8 +1237,9 @@ void mips_malta_init(MachineState *machine)
pci_bus_map_irqs(pci_bus, malta_pci_slot_get_pirq);
/* Southbridge */
- piix4 = pci_create_simple_multifunction(pci_bus, PIIX4_PCI_DEVFN,
- TYPE_PIIX4_PCI_DEVICE);
+ piix4 = pci_new_multifunction(PIIX4_PCI_DEVFN, TYPE_PIIX4_PCI_DEVICE);
+ qdev_prop_set_uint32(DEVICE(piix4), "smb_io_base", 0x1100);
+ pci_realize_and_unref(piix4, pci_bus, &error_fatal);
isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix4), "isa.0"));
dev = DEVICE(object_resolve_path_component(OBJECT(piix4), "ide"));
diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c
index 73437cd90f..6c32e466a3 100644
--- a/hw/mips/mips_int.c
+++ b/hw/mips/mips_int.c
@@ -23,7 +23,6 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "hw/irq.h"
-#include "hw/mips/cpudevs.h"
#include "sysemu/kvm.h"
#include "kvm_mips.h"
diff --git a/hw/mips/mipssim.c b/hw/mips/mipssim.c
index 2f951f7fc6..4f743f37eb 100644
--- a/hw/mips/mipssim.c
+++ b/hw/mips/mipssim.c
@@ -30,7 +30,6 @@
#include "qemu/datadir.h"
#include "hw/clock.h"
#include "hw/mips/mips.h"
-#include "hw/mips/cpudevs.h"
#include "hw/char/serial.h"
#include "hw/isa/isa.h"
#include "net/net.h"
diff --git a/hw/misc/allwinner-r40-dramc.c b/hw/misc/allwinner-r40-dramc.c
index 6944f84455..3d81ddb2e1 100644
--- a/hw/misc/allwinner-r40-dramc.c
+++ b/hw/misc/allwinner-r40-dramc.c
@@ -421,19 +421,23 @@ static void allwinner_r40_dramc_realize(DeviceState *dev, Error **errp)
exit(1);
}
- /* detect_cells */
- sysbus_mmio_map_overlap(SYS_BUS_DEVICE(s), 3, s->ram_addr, 10);
+ /* R40 support max 2G memory but we only support up to 1G now. */
+ memory_region_init_io(&s->detect_cells, OBJECT(s),
+ &allwinner_r40_detect_ops, s,
+ "DRAMCELLS", 1 * GiB);
+ memory_region_add_subregion_overlap(get_system_memory(), s->ram_addr,
+ &s->detect_cells, 10);
memory_region_set_enabled(&s->detect_cells, false);
/*
* We only support DRAM size up to 1G now, so prepare a high memory page
- * after 1G for dualrank detect. index = 4
+ * after 1G for dualrank detect.
*/
memory_region_init_io(&s->dram_high, OBJECT(s),
&allwinner_r40_dualrank_detect_ops, s,
"DRAMHIGH", KiB);
- sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->dram_high);
- sysbus_mmio_map(SYS_BUS_DEVICE(s), 4, s->ram_addr + GiB);
+ memory_region_add_subregion(get_system_memory(), s->ram_addr + GiB,
+ &s->dram_high);
}
static void allwinner_r40_dramc_init(Object *obj)
@@ -458,12 +462,6 @@ static void allwinner_r40_dramc_init(Object *obj)
&allwinner_r40_dramphy_ops, s,
"DRAMPHY", 4 * KiB);
sysbus_init_mmio(sbd, &s->dramphy_iomem);
-
- /* R40 support max 2G memory but we only support up to 1G now. index 3 */
- memory_region_init_io(&s->detect_cells, OBJECT(s),
- &allwinner_r40_detect_ops, s,
- "DRAMCELLS", 1 * GiB);
- sysbus_init_mmio(sbd, &s->detect_cells);
}
static Property allwinner_r40_dramc_properties[] = {
diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c
index 4ed9faa54a..ff55a4e2cd 100644
--- a/hw/misc/bcm2835_property.c
+++ b/hw/misc/bcm2835_property.c
@@ -12,7 +12,7 @@
#include "migration/vmstate.h"
#include "hw/irq.h"
#include "hw/misc/bcm2835_mbox_defs.h"
-#include "hw/misc/raspberrypi-fw-defs.h"
+#include "hw/arm/raspberrypi-fw-defs.h"
#include "sysemu/dma.h"
#include "qemu/log.h"
#include "qemu/module.h"
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index d66d912172..0447888029 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -903,8 +903,7 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
if (!ivshmem_is_master(s)) {
error_setg(&s->migration_blocker,
"Migration is disabled when using feature 'peer mode' in device 'ivshmem'");
- if (migrate_add_blocker(s->migration_blocker, errp) < 0) {
- error_free(s->migration_blocker);
+ if (migrate_add_blocker(&s->migration_blocker, errp) < 0) {
return;
}
}
@@ -922,10 +921,7 @@ static void ivshmem_exit(PCIDevice *dev)
IVShmemState *s = IVSHMEM_COMMON(dev);
int i;
- if (s->migration_blocker) {
- migrate_del_blocker(s->migration_blocker);
- error_free(s->migration_blocker);
- }
+ migrate_del_blocker(&s->migration_blocker);
if (memory_region_is_mapped(s->ivshmem_bar2)) {
if (!s->hostmem) {
diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c
index 0eda302db4..5a83ccc4e8 100644
--- a/hw/misc/mips_itu.c
+++ b/hw/misc/mips_itu.c
@@ -532,7 +532,7 @@ static void mips_itu_realize(DeviceState *dev, Error **errp)
return;
}
- env = &s->cpu0->env;
+ env = &MIPS_CPU(s->cpu0)->env;
if (env->saarp) {
s->saar = env->CP0_SAAR;
}
@@ -563,7 +563,7 @@ static Property mips_itu_properties[] = {
ITC_FIFO_NUM_MAX),
DEFINE_PROP_UINT32("num-semaphores", MIPSITUState, num_semaphores,
ITC_SEMAPH_NUM_MAX),
- DEFINE_PROP_LINK("cpu[0]", MIPSITUState, cpu0, TYPE_MIPS_CPU, MIPSCPU *),
+ DEFINE_PROP_LINK("cpu[0]", MIPSITUState, cpu0, TYPE_MIPS_CPU, ArchCPU *),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/misc/pci-testdev.c b/hw/misc/pci-testdev.c
index 49303134e4..acedd0f82b 100644
--- a/hw/misc/pci-testdev.c
+++ b/hw/misc/pci-testdev.c
@@ -245,7 +245,6 @@ static void pci_testdev_realize(PCIDevice *pci_dev, Error **errp)
uint8_t *pci_conf;
char *name;
int r, i;
- bool fastmmio = kvm_ioeventfd_any_length_enabled();
pci_conf = pci_dev->config;
@@ -279,7 +278,7 @@ static void pci_testdev_realize(PCIDevice *pci_dev, Error **errp)
g_free(name);
test->hdr->offset = cpu_to_le32(IOTEST_SIZE(i) + i * IOTEST_ACCESS_WIDTH);
test->match_data = strcmp(IOTEST_TEST(i), "wildcard-eventfd");
- if (fastmmio && IOTEST_IS_MEM(i) && !test->match_data) {
+ if (IOTEST_IS_MEM(i) && !test->match_data) {
test->size = 0;
} else {
test->size = IOTEST_ACCESS_WIDTH;
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index f445d8bb5e..37e209cda6 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -1654,11 +1654,6 @@ static void gem_init(Object *obj)
"enet", sizeof(s->regs));
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
-
- object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
- (Object **)&s->dma_mr,
- qdev_prop_allow_set_link_before_realize,
- OBJ_PROP_LINK_STRONG);
}
static const VMStateDescription vmstate_cadence_gem = {
@@ -1691,6 +1686,8 @@ static Property gem_properties[] = {
num_type2_screeners, 4),
DEFINE_PROP_UINT16("jumbo-max-len", CadenceGEMState,
jumbo_max_len, 10240),
+ DEFINE_PROP_LINK("dma", CadenceGEMState, dma_mr,
+ TYPE_MEMORY_REGION, MemoryRegion *),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/net/tulip.c b/hw/net/tulip.c
index 915e5fb595..11d866e431 100644
--- a/hw/net/tulip.c
+++ b/hw/net/tulip.c
@@ -1020,7 +1020,7 @@ static void tulip_class_init(ObjectClass *klass, void *data)
k->exit = pci_tulip_exit;
k->vendor_id = PCI_VENDOR_ID_DEC;
k->device_id = PCI_DEVICE_ID_DEC_21143;
- k->subsystem_vendor_id = 0x103c;
+ k->subsystem_vendor_id = PCI_VENDOR_ID_HP;
k->subsystem_id = 0x104f;
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
dc->vmsd = &vmstate_pci_tulip;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 29e33ea5ed..b85c7946a7 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3624,8 +3624,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
n->primary_listener.hide_device = failover_hide_primary_device;
qatomic_set(&n->failover_primary_hidden, true);
device_listener_register(&n->primary_listener);
- n->migration_state.notify = virtio_net_migration_state_notifier;
- add_migration_state_change_notifier(&n->migration_state);
+ migration_add_notifier(&n->migration_state,
+ virtio_net_migration_state_notifier);
n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY);
}
@@ -3788,7 +3788,7 @@ static void virtio_net_device_unrealize(DeviceState *dev)
if (n->failover) {
qobject_unref(n->primary_opts);
device_listener_unregister(&n->primary_listener);
- remove_migration_state_change_notifier(&n->migration_state);
+ migration_remove_notifier(&n->migration_state);
} else {
assert(n->primary_opts == NULL);
}
diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c
index c6b484cc85..e18e7770e1 100644
--- a/hw/nvram/xlnx-bbram.c
+++ b/hw/nvram/xlnx-bbram.c
@@ -2,6 +2,7 @@
* QEMU model of the Xilinx BBRAM Battery Backed RAM
*
* Copyright (c) 2014-2021 Xilinx Inc.
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -416,9 +417,9 @@ static RegisterAccessInfo bbram_ctrl_regs_info[] = {
}
};
-static void bbram_ctrl_reset(DeviceState *dev)
+static void bbram_ctrl_reset_hold(Object *obj)
{
- XlnxBBRam *s = XLNX_BBRAM(dev);
+ XlnxBBRam *s = XLNX_BBRAM(obj);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
@@ -522,8 +523,9 @@ static Property bbram_ctrl_props[] = {
static void bbram_ctrl_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
- dc->reset = bbram_ctrl_reset;
+ rc->phases.hold = bbram_ctrl_reset_hold;
dc->realize = bbram_ctrl_realize;
dc->vmsd = &vmstate_bbram_ctrl;
device_class_set_props(dc, bbram_ctrl_props);
diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c
index b35ba65ab5..beb5661c35 100644
--- a/hw/nvram/xlnx-versal-efuse-ctrl.c
+++ b/hw/nvram/xlnx-versal-efuse-ctrl.c
@@ -2,6 +2,7 @@
* QEMU model of the Versal eFuse controller
*
* Copyright (c) 2020 Xilinx Inc.
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -657,9 +658,9 @@ static void efuse_ctrl_register_reset(RegisterInfo *reg)
register_reset(reg);
}
-static void efuse_ctrl_reset(DeviceState *dev)
+static void efuse_ctrl_reset_hold(Object *obj)
{
- XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev);
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
@@ -749,8 +750,9 @@ static Property efuse_ctrl_props[] = {
static void efuse_ctrl_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
- dc->reset = efuse_ctrl_reset;
+ rc->phases.hold = efuse_ctrl_reset_hold;
dc->realize = efuse_ctrl_realize;
dc->vmsd = &vmstate_efuse_ctrl;
device_class_set_props(dc, efuse_ctrl_props);
diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c
index 228ba0bbfa..3db5f98ec1 100644
--- a/hw/nvram/xlnx-zynqmp-efuse.c
+++ b/hw/nvram/xlnx-zynqmp-efuse.c
@@ -2,6 +2,7 @@
* QEMU model of the ZynqMP eFuse
*
* Copyright (c) 2015 Xilinx Inc.
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
*
* Written by Edgar E. Iglesias <edgari@xilinx.com>
*
@@ -769,9 +770,9 @@ static void zynqmp_efuse_register_reset(RegisterInfo *reg)
register_reset(reg);
}
-static void zynqmp_efuse_reset(DeviceState *dev)
+static void zynqmp_efuse_reset_hold(Object *obj)
{
- XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev);
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
@@ -837,8 +838,9 @@ static Property zynqmp_efuse_props[] = {
static void zynqmp_efuse_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
- dc->reset = zynqmp_efuse_reset;
+ rc->phases.hold = zynqmp_efuse_reset_hold;
dc->realize = zynqmp_efuse_realize;
dc->vmsd = &vmstate_efuse;
device_class_set_props(dc, zynqmp_efuse_props);
diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig
index a07070eddf..54a609d2ca 100644
--- a/hw/pci-host/Kconfig
+++ b/hw/pci-host/Kconfig
@@ -82,6 +82,10 @@ config DINO
bool
select PCI
+config ASTRO
+ bool
+ select PCI
+
config GT64120
bool
select PCI
diff --git a/hw/pci-host/astro.c b/hw/pci-host/astro.c
new file mode 100644
index 0000000000..4b2d7caf2d
--- /dev/null
+++ b/hw/pci-host/astro.c
@@ -0,0 +1,885 @@
+/*
+ * HP-PARISC Astro/Pluto/Ike/REO system bus adapter (SBA)
+ * with Elroy PCI bus (LBA) adapter emulation
+ * Found in C3000 and similar machines
+ *
+ * (C) 2023 by Helge Deller <deller@gmx.de>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ * Chip documentation is available at:
+ * https://parisc.wiki.kernel.org/index.php/Technical_Documentation
+ *
+ * TODO:
+ * - All user-added devices are currently attached to the first
+ * Elroy (PCI bus) only for now. To fix this additional work in
+ * SeaBIOS and this driver is needed. See "user_creatable" flag below.
+ * - GMMIO (Greater than 4 GB MMIO) register
+ */
+
+#define TYPE_ASTRO_IOMMU_MEMORY_REGION "astro-iommu-memory-region"
+
+#include "qemu/osdep.h"
+#include "qemu/module.h"
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "hw/irq.h"
+#include "hw/pci/pci_device.h"
+#include "hw/pci/pci_bus.h"
+#include "hw/qdev-properties.h"
+#include "hw/pci-host/astro.h"
+#include "hw/hppa/hppa_hardware.h"
+#include "migration/vmstate.h"
+#include "trace.h"
+#include "qom/object.h"
+
+/*
+ * Helper functions
+ */
+
+static uint64_t mask_32bit_val(hwaddr addr, unsigned size, uint64_t val)
+{
+ if (size == 8) {
+ return val;
+ }
+ if (addr & 4) {
+ val >>= 32;
+ } else {
+ val = (uint32_t) val;
+ }
+ return val;
+}
+
+static void put_val_in_int64(uint64_t *p, hwaddr addr, unsigned size,
+ uint64_t val)
+{
+ if (size == 8) {
+ *p = val;
+ } else if (size == 4) {
+ if (addr & 4) {
+ *p = ((*p << 32) >> 32) | (val << 32);
+ } else {
+ *p = ((*p >> 32) << 32) | (uint32_t) val;
+ }
+ }
+}
+
+static void put_val_in_arrary(uint64_t *array, hwaddr start_addr,
+ hwaddr addr, unsigned size, uint64_t val)
+{
+ int index;
+
+ index = (addr - start_addr) / 8;
+ put_val_in_int64(&array[index], addr, size, val);
+}
+
+
+/*
+ * The Elroy PCI host bridge. We have at least 4 of those under Astro.
+ */
+
+static MemTxResult elroy_chip_read_with_attrs(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ MemTxResult ret = MEMTX_OK;
+ ElroyState *s = opaque;
+ uint64_t val = -1;
+ int index;
+
+ switch ((addr >> 3) << 3) {
+ case 0x0008:
+ val = 0x6000005; /* func_class */
+ break;
+ case 0x0058:
+ /*
+ * Scratch register, but firmware initializes it with the
+ * PCI BUS number and Linux/HP-UX uses it then.
+ */
+ val = s->pci_bus_num;
+ /* Upper byte holds the end of this bus number */
+ val |= s->pci_bus_num << 8;
+ break;
+ case 0x0080:
+ val = s->arb_mask; /* set ARB mask */
+ break;
+ case 0x0108:
+ val = s->status_control;
+ break;
+ case 0x200 ... 0x250 - 1: /* LMMIO, GMMIO, WLMMIO, WGMMIO, ... */
+ index = (addr - 0x200) / 8;
+ val = s->mmio_base[index];
+ break;
+ case 0x0680:
+ val = s->error_config;
+ break;
+ case 0x0688:
+ val = 0; /* ERROR_STATUS */
+ break;
+ case 0x0800: /* IOSAPIC_REG_SELECT */
+ val = s->iosapic_reg_select;
+ break;
+ case 0x0808:
+ val = UINT64_MAX; /* XXX: tbc. */
+ g_assert_not_reached();
+ break;
+ case 0x0810: /* IOSAPIC_REG_WINDOW */
+ switch (s->iosapic_reg_select) {
+ case 0x01: /* IOSAPIC_REG_VERSION */
+ val = (32 << 16) | 1; /* upper 16bit holds max entries */
+ break;
+ default:
+ if (s->iosapic_reg_select < ARRAY_SIZE(s->iosapic_reg)) {
+ val = s->iosapic_reg[s->iosapic_reg_select];
+ } else {
+ trace_iosapic_reg_read(s->iosapic_reg_select, size, val);
+ g_assert_not_reached();
+ }
+ }
+ trace_iosapic_reg_read(s->iosapic_reg_select, size, val);
+ break;
+ default:
+ trace_elroy_read(addr, size, val);
+ g_assert_not_reached();
+ }
+ trace_elroy_read(addr, size, val);
+
+ /* for 32-bit accesses mask return value */
+ val = mask_32bit_val(addr, size, val);
+
+ trace_astro_chip_read(addr, size, val);
+ *data = val;
+ return ret;
+}
+
+
+static MemTxResult elroy_chip_write_with_attrs(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size,
+ MemTxAttrs attrs)
+{
+ ElroyState *s = opaque;
+ int i;
+
+ trace_elroy_write(addr, size, val);
+
+ switch ((addr >> 3) << 3) {
+ case 0x080:
+ put_val_in_int64(&s->arb_mask, addr, size, val);
+ break;
+ case 0x0108:
+ put_val_in_int64(&s->status_control, addr, size, val);
+ break;
+ case 0x200 ... 0x250 - 1: /* LMMIO, GMMIO, WLMMIO, WGMMIO, ... */
+ put_val_in_arrary(s->mmio_base, 0x200, addr, size, val);
+ break;
+ case 0x0680:
+ put_val_in_int64(&s->error_config, addr, size, val);
+ break;
+ case 0x0800: /* IOSAPIC_REG_SELECT */
+ s->iosapic_reg_select = val;
+ break;
+ case 0x0810: /* IOSAPIC_REG_WINDOW */
+ trace_iosapic_reg_write(s->iosapic_reg_select, size, val);
+ if (s->iosapic_reg_select < ARRAY_SIZE(s->iosapic_reg)) {
+ s->iosapic_reg[s->iosapic_reg_select] = val;
+ } else {
+ g_assert_not_reached();
+ }
+ break;
+ case 0x0840: /* IOSAPIC_REG_EOI */
+ val = le64_to_cpu(val);
+ val &= 63;
+ for (i = 0; i < ELROY_IRQS; i++) {
+ if ((s->iosapic_reg[0x10 + 2 * i] & 63) == val) {
+ s->ilr &= ~(1ull << i);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps elroy_chip_ops = {
+ .read_with_attrs = elroy_chip_read_with_attrs,
+ .write_with_attrs = elroy_chip_write_with_attrs,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+};
+
+
+/* Unlike pci_config_data_le_ops, no check of high bit set in config_reg. */
+
+static uint64_t elroy_config_data_read(void *opaque, hwaddr addr, unsigned len)
+{
+ uint64_t val;
+
+ PCIHostState *s = opaque;
+ val = pci_data_read(s->bus, s->config_reg | (addr & 3), len);
+ trace_elroy_pci_config_data_read(s->config_reg | (addr & 3), len, val);
+ return val;
+}
+
+static void elroy_config_data_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned len)
+{
+ PCIHostState *s = opaque;
+ pci_data_write(s->bus, s->config_reg | (addr & 3), val, len);
+ trace_elroy_pci_config_data_write(s->config_reg | (addr & 3), len, val);
+}
+
+static const MemoryRegionOps elroy_config_data_ops = {
+ .read = elroy_config_data_read,
+ .write = elroy_config_data_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t elroy_config_addr_read(void *opaque, hwaddr addr, unsigned len)
+{
+ ElroyState *s = opaque;
+ return s->config_reg_elroy;
+}
+
+static void elroy_config_addr_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned len)
+{
+ PCIHostState *s = opaque;
+ ElroyState *es = opaque;
+ es->config_reg_elroy = val; /* keep a copy of original value */
+ s->config_reg = val;
+}
+
+static const MemoryRegionOps elroy_config_addr_ops = {
+ .read = elroy_config_addr_read,
+ .write = elroy_config_addr_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 8,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+
+/*
+ * A subroutine of astro_translate_iommu that builds an IOMMUTLBEntry using the
+ * given translated address and mask.
+ */
+static bool make_iommu_tlbe(hwaddr addr, hwaddr taddr, hwaddr mask,
+ IOMMUTLBEntry *ret)
+{
+ hwaddr tce_mask = ~((1ull << 12) - 1);
+ ret->target_as = &address_space_memory;
+ ret->iova = addr & tce_mask;
+ ret->translated_addr = taddr & tce_mask;
+ ret->addr_mask = ~tce_mask;
+ ret->perm = IOMMU_RW;
+ return true;
+}
+
+/* Handle PCI-to-system address translation. */
+static IOMMUTLBEntry astro_translate_iommu(IOMMUMemoryRegion *iommu,
+ hwaddr addr,
+ IOMMUAccessFlags flag,
+ int iommu_idx)
+{
+ AstroState *s = container_of(iommu, AstroState, iommu);
+ IOMMUTLBEntry ret = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = 0,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE,
+ };
+ hwaddr pdir_ptr, index, a, ibase;
+ hwaddr addr_mask = 0xfff; /* 4k translation */
+ uint64_t entry;
+
+#define IOVP_SHIFT 12 /* equals PAGE_SHIFT */
+#define PDIR_INDEX(iovp) ((iovp) >> IOVP_SHIFT)
+#define IOVP_MASK PAGE_MASK
+#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
+
+ /* "range enable" flag cleared? */
+ if ((s->tlb_ibase & 1) == 0) {
+ make_iommu_tlbe(addr, addr, addr_mask, &ret);
+ return ret;
+ }
+
+ a = addr;
+ ibase = s->tlb_ibase & ~1ULL;
+ if ((a & s->tlb_imask) != ibase) {
+ /* do not translate this one! */
+ make_iommu_tlbe(addr, addr, addr_mask, &ret);
+ return ret;
+ }
+ index = PDIR_INDEX(a);
+ pdir_ptr = s->tlb_pdir_base + index * sizeof(entry);
+ entry = ldq_le_phys(&address_space_memory, pdir_ptr);
+ if (!(entry & SBA_PDIR_VALID_BIT)) { /* I/O PDIR entry valid ? */
+ g_assert_not_reached();
+ goto failure;
+ }
+ entry &= ~SBA_PDIR_VALID_BIT;
+ entry >>= IOVP_SHIFT;
+ entry <<= 12;
+ entry |= addr & 0xfff;
+ make_iommu_tlbe(addr, entry, addr_mask, &ret);
+ goto success;
+
+ failure:
+ ret = (IOMMUTLBEntry) { .perm = IOMMU_NONE };
+ success:
+ return ret;
+}
+
+static AddressSpace *elroy_pcihost_set_iommu(PCIBus *bus, void *opaque,
+ int devfn)
+{
+ ElroyState *s = opaque;
+ return &s->astro->iommu_as;
+}
+
+/*
+ * Encoding in IOSAPIC:
+ * base_addr == 0xfffa0000, we want to get 0xa0ff0000.
+ * eid 0x0ff00000 -> 0x00ff0000
+ * id 0x000ff000 -> 0xff000000
+ */
+#define SWIZZLE_HPA(a) \
+ ((((a) & 0x0ff00000) >> 4) | (((a) & 0x000ff000) << 12))
+#define UNSWIZZLE_HPA(a) \
+ (((((a) << 4) & 0x0ff00000) | (((a) >> 12) & 0x000ff000) | 0xf0000000))
+
+/* bits in the "low" I/O Sapic IRdT entry */
+#define IOSAPIC_IRDT_DISABLE 0x10000 /* if bit is set, mask this irq */
+#define IOSAPIC_IRDT_PO_LOW 0x02000
+#define IOSAPIC_IRDT_LEVEL_TRIG 0x08000
+#define IOSAPIC_IRDT_MODE_LPRI 0x00100
+
+#define CPU_IRQ_OFFSET 2
+
+static void elroy_set_irq(void *opaque, int irq, int level)
+{
+ ElroyState *s = opaque;
+ uint32_t bit;
+ uint32_t old_ilr = s->ilr;
+ hwaddr cpu_hpa;
+ uint32_t val;
+
+ val = s->iosapic_reg[0x10 + 2 * irq];
+ cpu_hpa = s->iosapic_reg[0x11 + 2 * irq];
+ /* low nibble of val has value to write into CPU irq reg */
+ bit = 1u << (val & (ELROY_IRQS - 1));
+ cpu_hpa = UNSWIZZLE_HPA(cpu_hpa);
+
+ if (level && (!(val & IOSAPIC_IRDT_DISABLE)) && cpu_hpa) {
+ uint32_t ena = bit & ~old_ilr;
+ s->ilr = old_ilr | bit;
+ if (ena != 0) {
+ stl_be_phys(&address_space_memory, cpu_hpa, val & 63);
+ }
+ } else {
+ s->ilr = old_ilr & ~bit;
+ }
+}
+
+static int elroy_pci_map_irq(PCIDevice *d, int irq_num)
+{
+ int slot = PCI_SLOT(d->devfn);
+
+ assert(irq_num >= 0 && irq_num < ELROY_IRQS);
+ return slot & (ELROY_IRQS - 1);
+}
+
+static void elroy_reset(DeviceState *dev)
+{
+ ElroyState *s = ELROY_PCI_HOST_BRIDGE(dev);
+ int irq;
+
+ /*
+ * Make sure to disable interrupts at reboot, otherwise the Linux kernel
+ * serial8250_config_port() in drivers/tty/serial/8250/8250_port.c
+ * will hang during autoconfig().
+ */
+ s->ilr = 0;
+ for (irq = 0; irq < ELROY_IRQS; irq++) {
+ s->iosapic_reg[0x10 + 2 * irq] = IOSAPIC_IRDT_PO_LOW |
+ IOSAPIC_IRDT_LEVEL_TRIG | (irq + CPU_IRQ_OFFSET) |
+ IOSAPIC_IRDT_DISABLE;
+ s->iosapic_reg[0x11 + 2 * irq] = SWIZZLE_HPA(CPU_HPA);
+ }
+}
+
+static void elroy_pcihost_init(Object *obj)
+{
+ ElroyState *s = ELROY_PCI_HOST_BRIDGE(obj);
+ PCIHostState *phb = PCI_HOST_BRIDGE(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ /* Elroy config access from CPU. */
+ memory_region_init_io(&s->this_mem, OBJECT(s), &elroy_chip_ops,
+ s, "elroy", 0x2000);
+
+ /* Elroy PCI config. */
+ memory_region_init_io(&phb->conf_mem, OBJECT(phb),
+ &elroy_config_addr_ops, DEVICE(s),
+ "pci-conf-idx", 8);
+ memory_region_init_io(&phb->data_mem, OBJECT(phb),
+ &elroy_config_data_ops, DEVICE(s),
+ "pci-conf-data", 8);
+ memory_region_add_subregion(&s->this_mem, 0x40,
+ &phb->conf_mem);
+ memory_region_add_subregion(&s->this_mem, 0x48,
+ &phb->data_mem);
+
+ /* Elroy PCI bus memory. */
+ memory_region_init(&s->pci_mmio, OBJECT(s), "pci-mmio", UINT64_MAX);
+ memory_region_init_io(&s->pci_io, OBJECT(s), &unassigned_io_ops, obj,
+ "pci-isa-mmio",
+ ((uint32_t) IOS_DIST_BASE_SIZE) / ROPES_PER_IOC);
+
+ phb->bus = pci_register_root_bus(DEVICE(s), "pci",
+ elroy_set_irq, elroy_pci_map_irq, s,
+ &s->pci_mmio, &s->pci_io,
+ PCI_DEVFN(0, 0), ELROY_IRQS, TYPE_PCI_BUS);
+
+ sysbus_init_mmio(sbd, &s->this_mem);
+
+ qdev_init_gpio_in(DEVICE(obj), elroy_set_irq, ELROY_IRQS);
+}
+
+static Property elroy_pcihost_properties[] = {
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription vmstate_elroy = {
+ .name = "Elroy",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(hpa, ElroyState),
+ VMSTATE_UINT32(pci_bus_num, ElroyState),
+ VMSTATE_UINT64(config_address, ElroyState),
+ VMSTATE_UINT64(config_reg_elroy, ElroyState),
+ VMSTATE_UINT64(status_control, ElroyState),
+ VMSTATE_UINT64(arb_mask, ElroyState),
+ VMSTATE_UINT64_ARRAY(mmio_base, ElroyState, (0x0250 - 0x200) / 8),
+ VMSTATE_UINT64(error_config, ElroyState),
+ VMSTATE_UINT32(iosapic_reg_select, ElroyState),
+ VMSTATE_UINT64_ARRAY(iosapic_reg, ElroyState, 0x20),
+ VMSTATE_UINT32(ilr, ElroyState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void elroy_pcihost_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = elroy_reset;
+ device_class_set_props(dc, elroy_pcihost_properties);
+ dc->vmsd = &vmstate_elroy;
+ dc->user_creatable = false;
+}
+
+static const TypeInfo elroy_pcihost_info = {
+ .name = TYPE_ELROY_PCI_HOST_BRIDGE,
+ .parent = TYPE_PCI_HOST_BRIDGE,
+ .instance_init = elroy_pcihost_init,
+ .instance_size = sizeof(ElroyState),
+ .class_init = elroy_pcihost_class_init,
+};
+
+static void elroy_register_types(void)
+{
+ type_register_static(&elroy_pcihost_info);
+}
+
+type_init(elroy_register_types)
+
+
+static ElroyState *elroy_init(int num)
+{
+ DeviceState *dev;
+
+ dev = qdev_new(TYPE_ELROY_PCI_HOST_BRIDGE);
+ dev->id = g_strdup_printf("elroy%d", num);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
+ return ELROY_PCI_HOST_BRIDGE(dev);
+}
+
+/*
+ * Astro Runway chip.
+ */
+
+static MemTxResult astro_chip_read_with_attrs(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ AstroState *s = opaque;
+ MemTxResult ret = MEMTX_OK;
+ uint64_t val = -1;
+ int index;
+
+ switch ((addr >> 3) << 3) {
+ /* R2I registers */
+ case 0x0000: /* ID */
+ val = (0x01 << 3) | 0x01ULL;
+ break;
+ case 0x0008: /* IOC_CTRL */
+ val = s->ioc_ctrl;
+ break;
+ case 0x0010: /* TOC_CLIENT_ID */
+ break;
+ case 0x0030: /* HP-UX 10.20 and 11.11 reads it. No idea. */
+ val = -1;
+ break;
+ case 0x0300 ... 0x03d8: /* LMMIO_DIRECT0_BASE... */
+ index = (addr - 0x300) / 8;
+ val = s->ioc_ranges[index];
+ break;
+ case 0x10200:
+ val = 0;
+ break;
+ case 0x10220:
+ case 0x10230: /* HP-UX 11.11 reads it. No idea. */
+ val = -1;
+ break;
+ case 0x22108: /* IOC STATUS_CONTROL */
+ val = s->ioc_status_ctrl;
+ break;
+ case 0x20200 ... 0x20240 - 1: /* IOC Rope0_Control ... */
+ index = (addr - 0x20200) / 8;
+ val = s->ioc_rope_control[index];
+ break;
+ case 0x20040: /* IOC Rope config */
+ val = s->ioc_rope_config;
+ break;
+ case 0x20050: /* IOC Rope debug */
+ val = 0;
+ break;
+ case 0x20108: /* IOC STATUS_CONTROL */
+ val = s->ioc_status_control;
+ break;
+ case 0x20310: /* IOC_PCOM */
+ val = s->tlb_pcom;
+ /* TODO: flush iommu */
+ break;
+ case 0x20400:
+ val = s->ioc_flush_control;
+ break;
+ /* empty placeholders for non-existent elroys */
+#define EMPTY_PORT(x) case x: case x+8: val = 0; break; \
+ case x+40: case x+48: val = UINT64_MAX; break;
+ EMPTY_PORT(0x30000)
+ EMPTY_PORT(0x32000)
+ EMPTY_PORT(0x34000)
+ EMPTY_PORT(0x36000)
+ EMPTY_PORT(0x38000)
+ EMPTY_PORT(0x3a000)
+ EMPTY_PORT(0x3c000)
+ EMPTY_PORT(0x3e000)
+#undef EMPTY_PORT
+
+ default:
+ trace_astro_chip_read(addr, size, val);
+ g_assert_not_reached();
+ }
+
+ /* for 32-bit accesses mask return value */
+ val = mask_32bit_val(addr, size, val);
+
+ trace_astro_chip_read(addr, size, val);
+ *data = val;
+ return ret;
+}
+
+static MemTxResult astro_chip_write_with_attrs(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size,
+ MemTxAttrs attrs)
+{
+ AstroState *s = opaque;
+
+ trace_astro_chip_write(addr, size, val);
+
+ switch ((addr >> 3) << 3) {
+ case 0x0000: /* ID */
+ break;
+ case 0x0008: /* IOC_CTRL */
+ val &= 0x0ffffff;
+ put_val_in_int64(&s->ioc_ctrl, addr, size, val);
+ break;
+ case 0x0010: /* TOC_CLIENT_ID */
+ break;
+ case 0x0030: /* HP-UX 10.20 and 11.11 reads it. No idea. */
+ break;
+ case 0x0300 ... 0x03d8 - 1: /* LMMIO_DIRECT0_BASE... */
+ put_val_in_arrary(s->ioc_ranges, 0x300, addr, size, val);
+ break;
+ case 0x10200:
+ case 0x10220:
+ case 0x10230: /* HP-UX 11.11 reads it. No idea. */
+ break;
+ case 0x22108: /* IOC STATUS_CONTROL */
+ put_val_in_int64(&s->ioc_status_ctrl, addr, size, val);
+ break;
+ case 0x20200 ... 0x20240 - 1: /* IOC Rope0_Control ... */
+ put_val_in_arrary(s->ioc_rope_control, 0x20200, addr, size, val);
+ break;
+ case 0x20040: /* IOC Rope config */
+ put_val_in_int64(&s->ioc_rope_config, addr, size, val);
+ break;
+ case 0x20300:
+ put_val_in_int64(&s->tlb_ibase, addr, size, val);
+ break;
+ case 0x20308:
+ put_val_in_int64(&s->tlb_imask, addr, size, val);
+ break;
+ case 0x20310:
+ put_val_in_int64(&s->tlb_pcom, addr, size, val);
+ /* TODO: flush iommu */
+ break;
+ case 0x20318:
+ put_val_in_int64(&s->tlb_tcnfg, addr, size, val);
+ break;
+ case 0x20320:
+ put_val_in_int64(&s->tlb_pdir_base, addr, size, val);
+ break;
+ /*
+ * empty placeholders for non-existent elroys, e.g.
+ * func_class, pci config & data
+ */
+#define EMPTY_PORT(x) case x: case x+8: case x+0x40: case x+0x48:
+ EMPTY_PORT(0x30000)
+ EMPTY_PORT(0x32000)
+ EMPTY_PORT(0x34000)
+ EMPTY_PORT(0x36000)
+ EMPTY_PORT(0x38000)
+ EMPTY_PORT(0x3a000)
+ EMPTY_PORT(0x3c000)
+ EMPTY_PORT(0x3e000)
+ break;
+#undef EMPTY_PORT
+
+ default:
+ /* Controlled by astro_chip_mem_valid above. */
+ trace_astro_chip_write(addr, size, val);
+ g_assert_not_reached();
+ }
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps astro_chip_ops = {
+ .read_with_attrs = astro_chip_read_with_attrs,
+ .write_with_attrs = astro_chip_write_with_attrs,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+};
+
+static const VMStateDescription vmstate_astro = {
+ .name = "Astro",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ioc_ctrl, AstroState),
+ VMSTATE_UINT64(ioc_status_ctrl, AstroState),
+ VMSTATE_UINT64_ARRAY(ioc_ranges, AstroState, (0x03d8 - 0x300) / 8),
+ VMSTATE_UINT64(ioc_rope_config, AstroState),
+ VMSTATE_UINT64(ioc_status_control, AstroState),
+ VMSTATE_UINT64(ioc_flush_control, AstroState),
+ VMSTATE_UINT64_ARRAY(ioc_rope_control, AstroState, 8),
+ VMSTATE_UINT64(tlb_ibase, AstroState),
+ VMSTATE_UINT64(tlb_imask, AstroState),
+ VMSTATE_UINT64(tlb_pcom, AstroState),
+ VMSTATE_UINT64(tlb_tcnfg, AstroState),
+ VMSTATE_UINT64(tlb_pdir_base, AstroState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void astro_reset(DeviceState *dev)
+{
+ AstroState *s = ASTRO_CHIP(dev);
+ int i;
+
+ s->ioc_ctrl = 0x29cf;
+ s->ioc_rope_config = 0xc5f;
+ s->ioc_flush_control = 0xb03;
+ s->ioc_status_control = 0;
+ memset(&s->ioc_rope_control, 0, sizeof(s->ioc_rope_control));
+
+ /*
+ * The SBA BASE/MASK registers control CPU -> IO routing.
+ * The LBA BASE/MASK registers control IO -> System routing (in Elroy)
+ */
+ memset(&s->ioc_ranges, 0, sizeof(s->ioc_ranges));
+ s->ioc_ranges[(0x360 - 0x300) / 8] = LMMIO_DIST_BASE_ADDR | 0x01; /* LMMIO_DIST_BASE (SBA) */
+ s->ioc_ranges[(0x368 - 0x300) / 8] = 0xfc000000; /* LMMIO_DIST_MASK */
+ s->ioc_ranges[(0x370 - 0x300) / 8] = 0; /* LMMIO_DIST_ROUTE */
+ s->ioc_ranges[(0x390 - 0x300) / 8] = IOS_DIST_BASE_ADDR | 0x01; /* IOS_DIST_BASE */
+ s->ioc_ranges[(0x398 - 0x300) / 8] = 0xffffff0000; /* IOS_DIST_MASK */
+ s->ioc_ranges[(0x3a0 - 0x300) / 8] = 0x3400000000000000ULL; /* IOS_DIST_ROUTE */
+ s->ioc_ranges[(0x3c0 - 0x300) / 8] = 0xfffee00000; /* IOS_DIRECT_BASE */
+ s->ioc_ranges[(0x3c8 - 0x300) / 8] = 0xffffff0000; /* IOS_DIRECT_MASK */
+ s->ioc_ranges[(0x3d0 - 0x300) / 8] = 0x0; /* IOS_DIRECT_ROUTE */
+
+ s->tlb_ibase = 0;
+ s->tlb_imask = 0;
+ s->tlb_pcom = 0;
+ s->tlb_tcnfg = 0;
+ s->tlb_pdir_base = 0;
+
+ for (i = 0; i < ELROY_NUM; i++) {
+ elroy_reset(DEVICE(s->elroy[i]));
+ }
+}
+
+static void astro_init(Object *obj)
+{
+}
+
+static void astro_realize(DeviceState *obj, Error **errp)
+{
+ AstroState *s = ASTRO_CHIP(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ int i;
+
+ memory_region_init_io(&s->this_mem, OBJECT(s), &astro_chip_ops,
+ s, "astro", 0x40000);
+ sysbus_init_mmio(sbd, &s->this_mem);
+
+ /* Host memory as seen from Elroys PCI side, via the IOMMU. */
+ memory_region_init_iommu(&s->iommu, sizeof(s->iommu),
+ TYPE_ASTRO_IOMMU_MEMORY_REGION, OBJECT(s),
+ "iommu-astro", UINT64_MAX);
+ address_space_init(&s->iommu_as, MEMORY_REGION(&s->iommu),
+ "bm-pci");
+
+ /* Create Elroys (PCI host bus chips). */
+ for (i = 0; i < ELROY_NUM; i++) {
+ static const int elroy_hpa_offsets[ELROY_NUM] = {
+ 0x30000, 0x32000, 0x38000, 0x3c000 };
+ static const char elroy_rope_nr[ELROY_NUM] = {
+ 0, 1, 4, 6 }; /* busnum path, e.g. [10:6] */
+ int addr_offset;
+ ElroyState *elroy;
+ hwaddr map_addr;
+ uint64_t map_size;
+ int rope;
+
+ addr_offset = elroy_hpa_offsets[i];
+ rope = elroy_rope_nr[i];
+
+ elroy = elroy_init(i);
+ s->elroy[i] = elroy;
+ elroy->hpa = ASTRO_HPA + addr_offset;
+ elroy->pci_bus_num = i;
+ elroy->astro = s;
+
+ /*
+ * NOTE: we only allow PCI devices on first Elroy for now.
+ * SeaBIOS will not find devices on the other busses.
+ */
+ if (i > 0) {
+ qbus_mark_full(&PCI_HOST_BRIDGE(elroy)->bus->qbus);
+ }
+
+ /* map elroy config addresses into Astro space */
+ memory_region_add_subregion(&s->this_mem, addr_offset,
+ &elroy->this_mem);
+
+ /* LMMIO */
+ elroy->mmio_base[(0x0200 - 0x200) / 8] = 0xf0000001;
+ elroy->mmio_base[(0x0208 - 0x200) / 8] = 0xf8000000;
+ /* GMMIO */
+ elroy->mmio_base[(0x0210 - 0x200) / 8] = 0x000000f800000001;
+ elroy->mmio_base[(0x0218 - 0x200) / 8] = 0x000000ff80000000;
+ /* WLMMIO */
+ elroy->mmio_base[(0x0220 - 0x200) / 8] = 0xf0000001;
+ elroy->mmio_base[(0x0228 - 0x200) / 8] = 0xf0000000;
+ /* WGMMIO */
+ elroy->mmio_base[(0x0230 - 0x200) / 8] = 0x000000f800000001;
+ elroy->mmio_base[(0x0238 - 0x200) / 8] = 0x000000fc00000000;
+ /* IOS_BASE */
+ map_size = IOS_DIST_BASE_SIZE / ROPES_PER_IOC;
+ elroy->mmio_base[(0x0240 - 0x200) / 8] = rope * map_size | 0x01;
+ elroy->mmio_base[(0x0248 - 0x200) / 8] = 0x0000e000;
+
+ /* map elroys mmio */
+ map_size = LMMIO_DIST_BASE_SIZE / ROPES_PER_IOC;
+ map_addr = (uint32_t) (LMMIO_DIST_BASE_ADDR + rope * map_size);
+ memory_region_init_alias(&elroy->pci_mmio_alias, OBJECT(elroy),
+ "pci-mmio-alias",
+ &elroy->pci_mmio, map_addr, map_size);
+ memory_region_add_subregion(get_system_memory(), map_addr,
+ &elroy->pci_mmio_alias);
+
+ map_size = IOS_DIST_BASE_SIZE / ROPES_PER_IOC;
+ map_addr = (uint32_t) (IOS_DIST_BASE_ADDR + rope * map_size);
+ memory_region_add_subregion(get_system_memory(), map_addr,
+ &elroy->pci_io);
+
+ /* Host memory as seen from the PCI side, via the IOMMU. */
+ pci_setup_iommu(PCI_HOST_BRIDGE(elroy)->bus, elroy_pcihost_set_iommu,
+ elroy);
+ }
+}
+
+static void astro_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = astro_reset;
+ dc->vmsd = &vmstate_astro;
+ dc->realize = astro_realize;
+ /*
+ * astro with elroys are hard part of the newer PA2.0 machines and can not
+ * be created without that hardware
+ */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo astro_chip_info = {
+ .name = TYPE_ASTRO_CHIP,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = astro_init,
+ .instance_size = sizeof(AstroState),
+ .class_init = astro_class_init,
+};
+
+static void astro_iommu_memory_region_class_init(ObjectClass *klass,
+ void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = astro_translate_iommu;
+}
+
+static const TypeInfo astro_iommu_memory_region_info = {
+ .parent = TYPE_IOMMU_MEMORY_REGION,
+ .name = TYPE_ASTRO_IOMMU_MEMORY_REGION,
+ .class_init = astro_iommu_memory_region_class_init,
+};
+
+
+static void astro_register_types(void)
+{
+ type_register_static(&astro_chip_info);
+ type_register_static(&astro_iommu_memory_region_info);
+}
+
+type_init(astro_register_types)
diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c
index ee6cb85e97..bab661f3ce 100644
--- a/hw/pci-host/bonito.c
+++ b/hw/pci-host/bonito.c
@@ -654,7 +654,7 @@ static void bonito_host_realize(DeviceState *dev, Error **errp)
static void bonito_pci_realize(PCIDevice *dev, Error **errp)
{
PCIBonitoState *s = PCI_BONITO(dev);
- SysBusDevice *sysbus = SYS_BUS_DEVICE(s->pcihost);
+ MemoryRegion *host_mem = get_system_memory();
PCIHostState *phb = PCI_HOST_BRIDGE(s->pcihost);
BonitoState *bs = s->pcihost;
MemoryRegion *pcimem_alias = g_new(MemoryRegion, 1);
@@ -668,48 +668,45 @@ static void bonito_pci_realize(PCIDevice *dev, Error **errp)
/* set the north bridge register mapping */
memory_region_init_io(&s->iomem, OBJECT(s), &bonito_ops, s,
"north-bridge-register", BONITO_INTERNAL_REG_SIZE);
- sysbus_init_mmio(sysbus, &s->iomem);
- sysbus_mmio_map(sysbus, 0, BONITO_INTERNAL_REG_BASE);
+ memory_region_add_subregion(host_mem, BONITO_INTERNAL_REG_BASE, &s->iomem);
/* set the north bridge pci configure mapping */
memory_region_init_io(&phb->conf_mem, OBJECT(s), &bonito_pciconf_ops, s,
"north-bridge-pci-config", BONITO_PCICONFIG_SIZE);
- sysbus_init_mmio(sysbus, &phb->conf_mem);
- sysbus_mmio_map(sysbus, 1, BONITO_PCICONFIG_BASE);
+ memory_region_add_subregion(host_mem, BONITO_PCICONFIG_BASE,
+ &phb->conf_mem);
/* set the south bridge pci configure mapping */
memory_region_init_io(&phb->data_mem, OBJECT(s), &bonito_spciconf_ops, s,
"south-bridge-pci-config", BONITO_SPCICONFIG_SIZE);
- sysbus_init_mmio(sysbus, &phb->data_mem);
- sysbus_mmio_map(sysbus, 2, BONITO_SPCICONFIG_BASE);
+ memory_region_add_subregion(host_mem, BONITO_SPCICONFIG_BASE,
+ &phb->data_mem);
create_unimplemented_device("bonito", BONITO_REG_BASE, BONITO_REG_SIZE);
memory_region_init_io(&s->iomem_ldma, OBJECT(s), &bonito_ldma_ops, s,
"ldma", 0x100);
- sysbus_init_mmio(sysbus, &s->iomem_ldma);
- sysbus_mmio_map(sysbus, 3, 0x1fe00200);
+ memory_region_add_subregion(host_mem, 0x1fe00200, &s->iomem_ldma);
/* PCI copier */
memory_region_init_io(&s->iomem_cop, OBJECT(s), &bonito_cop_ops, s,
"cop", 0x100);
- sysbus_init_mmio(sysbus, &s->iomem_cop);
- sysbus_mmio_map(sysbus, 4, 0x1fe00300);
+ memory_region_add_subregion(host_mem, 0x1fe00300, &s->iomem_cop);
create_unimplemented_device("ROMCS", BONITO_FLASH_BASE, 60 * MiB);
/* Map PCI IO Space 0x1fd0 0000 - 0x1fd1 0000 */
memory_region_init_alias(&s->bonito_pciio, OBJECT(s), "isa_mmio",
get_system_io(), 0, BONITO_PCIIO_SIZE);
- sysbus_init_mmio(sysbus, &s->bonito_pciio);
- sysbus_mmio_map(sysbus, 5, BONITO_PCIIO_BASE);
+ memory_region_add_subregion(host_mem, BONITO_PCIIO_BASE,
+ &s->bonito_pciio);
/* add pci local io mapping */
memory_region_init_alias(&s->bonito_localio, OBJECT(s), "IOCS[0]",
get_system_io(), 0, 256 * KiB);
- sysbus_init_mmio(sysbus, &s->bonito_localio);
- sysbus_mmio_map(sysbus, 6, BONITO_DEV_BASE);
+ memory_region_add_subregion(host_mem, BONITO_DEV_BASE,
+ &s->bonito_localio);
create_unimplemented_device("IOCS[1]", BONITO_DEV_BASE + 1 * 256 * KiB,
256 * KiB);
create_unimplemented_device("IOCS[2]", BONITO_DEV_BASE + 2 * 256 * KiB,
@@ -719,8 +716,7 @@ static void bonito_pci_realize(PCIDevice *dev, Error **errp)
memory_region_init_alias(pcimem_alias, NULL, "pci.mem.alias",
&bs->pci_mem, 0, BONITO_PCIHI_SIZE);
- memory_region_add_subregion(get_system_memory(),
- BONITO_PCIHI_BASE, pcimem_alias);
+ memory_region_add_subregion(host_mem, BONITO_PCIHI_BASE, pcimem_alias);
create_unimplemented_device("PCI_2",
(hwaddr)BONITO_PCIHI_BASE + BONITO_PCIHI_SIZE,
2 * GiB);
diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build
index 64eada76fe..f891f026cb 100644
--- a/hw/pci-host/meson.build
+++ b/hw/pci-host/meson.build
@@ -27,6 +27,7 @@ pci_ss.add(when: 'CONFIG_MV64361', if_true: files('mv64361.c'))
pci_ss.add(when: 'CONFIG_VERSATILE_PCI', if_true: files('versatile.c'))
# HPPA devices
+pci_ss.add(when: 'CONFIG_ASTRO', if_true: files('astro.c'))
pci_ss.add(when: 'CONFIG_DINO', if_true: files('dino.c'))
system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss)
diff --git a/hw/pci-host/sh_pci.c b/hw/pci-host/sh_pci.c
index 77e7bbc65f..4edebced5e 100644
--- a/hw/pci-host/sh_pci.c
+++ b/hw/pci-host/sh_pci.c
@@ -40,7 +40,7 @@ struct SHPCIState {
PCIHostState parent_obj;
PCIDevice *dev;
- qemu_irq irq[4];
+ qemu_irq irq[PCI_NUM_PINS];
MemoryRegion memconfig_p4;
MemoryRegion memconfig_a7;
MemoryRegion isa;
@@ -116,7 +116,7 @@ static void sh_pci_set_irq(void *opaque, int irq_num, int level)
qemu_set_irq(pic[irq_num], level);
}
-static void sh_pci_device_realize(DeviceState *dev, Error **errp)
+static void sh_pcic_host_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
SHPCIState *s = SH_PCI_HOST_BRIDGE(dev);
@@ -131,7 +131,8 @@ static void sh_pci_device_realize(DeviceState *dev, Error **errp)
s->irq,
get_system_memory(),
get_system_io(),
- PCI_DEVFN(0, 0), 4, TYPE_PCI_BUS);
+ PCI_DEVFN(0, 0), PCI_NUM_PINS,
+ TYPE_PCI_BUS);
memory_region_init_io(&s->memconfig_p4, OBJECT(s), &sh_pci_reg_ops, s,
"sh_pci", 0x224);
memory_region_init_alias(&s->memconfig_a7, OBJECT(s), "sh_pci.2",
@@ -145,19 +146,19 @@ static void sh_pci_device_realize(DeviceState *dev, Error **errp)
s->dev = pci_create_simple(phb->bus, PCI_DEVFN(0, 0), "sh_pci_host");
}
-static void sh_pci_host_realize(PCIDevice *d, Error **errp)
+static void sh_pcic_pci_realize(PCIDevice *d, Error **errp)
{
pci_set_word(d->config + PCI_COMMAND, PCI_COMMAND_WAIT);
pci_set_word(d->config + PCI_STATUS, PCI_STATUS_CAP_LIST |
PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM);
}
-static void sh_pci_host_class_init(ObjectClass *klass, void *data)
+static void sh_pcic_pci_class_init(ObjectClass *klass, void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- k->realize = sh_pci_host_realize;
+ k->realize = sh_pcic_pci_realize;
k->vendor_id = PCI_VENDOR_ID_HITACHI;
k->device_id = PCI_DEVICE_ID_HITACHI_SH7751R;
/*
@@ -167,35 +168,29 @@ static void sh_pci_host_class_init(ObjectClass *klass, void *data)
dc->user_creatable = false;
}
-static const TypeInfo sh_pci_host_info = {
- .name = "sh_pci_host",
- .parent = TYPE_PCI_DEVICE,
- .instance_size = sizeof(PCIDevice),
- .class_init = sh_pci_host_class_init,
- .interfaces = (InterfaceInfo[]) {
- { INTERFACE_CONVENTIONAL_PCI_DEVICE },
- { },
- },
-};
-
-static void sh_pci_device_class_init(ObjectClass *klass, void *data)
+static void sh_pcic_host_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->realize = sh_pci_device_realize;
+ dc->realize = sh_pcic_host_realize;
}
-static const TypeInfo sh_pci_device_info = {
- .name = TYPE_SH_PCI_HOST_BRIDGE,
- .parent = TYPE_PCI_HOST_BRIDGE,
- .instance_size = sizeof(SHPCIState),
- .class_init = sh_pci_device_class_init,
+static const TypeInfo sh_pcic_types[] = {
+ {
+ .name = TYPE_SH_PCI_HOST_BRIDGE,
+ .parent = TYPE_PCI_HOST_BRIDGE,
+ .instance_size = sizeof(SHPCIState),
+ .class_init = sh_pcic_host_class_init,
+ }, {
+ .name = "sh_pci_host",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PCIDevice),
+ .class_init = sh_pcic_pci_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+ },
};
-static void sh_pci_register_types(void)
-{
- type_register_static(&sh_pci_device_info);
- type_register_static(&sh_pci_host_info);
-}
-
-type_init(sh_pci_register_types)
+DEFINE_TYPES(sh_pcic_types)
diff --git a/hw/pci-host/trace-events b/hw/pci-host/trace-events
index 9d216bb89f..b2f47e6335 100644
--- a/hw/pci-host/trace-events
+++ b/hw/pci-host/trace-events
@@ -46,3 +46,14 @@ pnv_phb4_xive_notify_abt(uint64_t notif_port, uint64_t data) "notif=@0x%"PRIx64"
dino_chip_mem_valid(uint64_t addr, uint32_t val) "access to addr 0x%"PRIx64" is %d"
dino_chip_read(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x"
dino_chip_write(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x"
+
+# astro.c
+astro_chip_mem_valid(uint64_t addr, uint32_t val) "access to addr 0x%"PRIx64" is %d"
+astro_chip_read(uint64_t addr, int size, uint64_t val) "addr 0x%"PRIx64" size %d val 0x%"PRIx64
+astro_chip_write(uint64_t addr, int size, uint64_t val) "addr 0x%"PRIx64" size %d val 0x%"PRIx64
+elroy_read(uint64_t addr, int size, uint64_t val) "addr 0x%"PRIx64" size %d val 0x%"PRIx64
+elroy_write(uint64_t addr, int size, uint64_t val) "addr 0x%"PRIx64" size %d val 0x%"PRIx64
+elroy_pci_config_data_read(uint64_t addr, int size, uint64_t val) "addr 0x%"PRIx64" size %d val 0x%"PRIx64
+elroy_pci_config_data_write(uint64_t addr, int size, uint64_t val) "addr 0x%"PRIx64" size %d val 0x%"PRIx64
+iosapic_reg_write(uint64_t reg_select, int size, uint64_t val) "reg_select 0x%"PRIx64" size %d val 0x%"PRIx64
+iosapic_reg_read(uint64_t reg_select, int size, uint64_t val) "reg_select 0x%"PRIx64" size %d val 0x%"PRIx64
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index b0d21bf43a..7d09e1a39d 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -500,15 +500,14 @@ bool pci_bus_bypass_iommu(PCIBus *bus)
}
static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min)
{
assert(PCI_FUNC(devfn_min) == 0);
bus->devfn_min = devfn_min;
bus->slot_reserved_mask = 0x0;
- bus->address_space_mem = address_space_mem;
- bus->address_space_io = address_space_io;
+ bus->address_space_mem = mem;
+ bus->address_space_io = io;
bus->flags |= PCI_BUS_IS_ROOT;
/* host bridge */
@@ -529,25 +528,21 @@ bool pci_bus_is_express(const PCIBus *bus)
void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
const char *name,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, const char *typename)
{
qbus_init(bus, bus_size, typename, parent, name);
- pci_root_bus_internal_init(bus, parent, address_space_mem,
- address_space_io, devfn_min);
+ pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
}
PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, const char *typename)
{
PCIBus *bus;
bus = PCI_BUS(qbus_new(typename, parent, name));
- pci_root_bus_internal_init(bus, parent, address_space_mem,
- address_space_io, devfn_min);
+ pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
return bus;
}
@@ -586,15 +581,13 @@ void pci_bus_irqs_cleanup(PCIBus *bus)
PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
void *irq_opaque,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, int nirq,
const char *typename)
{
PCIBus *bus;
- bus = pci_root_bus_new(parent, name, address_space_mem,
- address_space_io, devfn_min, typename);
+ bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename);
pci_bus_irqs(bus, set_irq, irq_opaque, nirq);
pci_bus_map_irqs(bus, map_irq);
return bus;
diff --git a/hw/ppc/pef.c b/hw/ppc/pef.c
index cc44d5e339..d28ed3ba73 100644
--- a/hw/ppc/pef.c
+++ b/hw/ppc/pef.c
@@ -63,7 +63,7 @@ static int kvmppc_svm_init(ConfidentialGuestSupport *cgs, Error **errp)
/* add migration blocker */
error_setg(&pef_mig_blocker, "PEF: Migration is not implemented");
/* NB: This can fail if --only-migratable is used */
- migrate_add_blocker(pef_mig_blocker, &error_fatal);
+ migrate_add_blocker(&pef_mig_blocker, &error_fatal);
cgs->ready = true;
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index eb54f93986..c0e34fffbc 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -1217,10 +1217,9 @@ static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp)
name = g_strdup_printf("icp-%x", chip->chip_id);
memory_region_init(&chip8->icp_mmio, OBJECT(chip), name, PNV_ICP_SIZE);
- sysbus_init_mmio(SYS_BUS_DEVICE(chip), &chip8->icp_mmio);
g_free(name);
-
- sysbus_mmio_map(SYS_BUS_DEVICE(chip), 1, PNV_ICP_BASE(chip));
+ memory_region_add_subregion(get_system_memory(), PNV_ICP_BASE(chip),
+ &chip8->icp_mmio);
/* Map the ICP registers for each thread */
for (i = 0; i < chip->nr_cores; i++) {
@@ -1249,12 +1248,7 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp)
assert(chip8->xics);
/* XSCOM bridge is first */
- pnv_xscom_realize(chip, PNV_XSCOM_SIZE, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV_XSCOM_BASE(chip));
+ pnv_xscom_init(chip, PNV_XSCOM_SIZE, PNV_XSCOM_BASE(chip));
pcc->parent_realize(dev, &local_err);
if (local_err) {
@@ -1512,12 +1506,7 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
Error *local_err = NULL;
/* XSCOM bridge is first */
- pnv_xscom_realize(chip, PNV9_XSCOM_SIZE, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV9_XSCOM_BASE(chip));
+ pnv_xscom_init(chip, PNV9_XSCOM_SIZE, PNV9_XSCOM_BASE(chip));
pcc->parent_realize(dev, &local_err);
if (local_err) {
@@ -1727,12 +1716,7 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
Error *local_err = NULL;
/* XSCOM bridge is first */
- pnv_xscom_realize(chip, PNV10_XSCOM_SIZE, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV10_XSCOM_BASE(chip));
+ pnv_xscom_init(chip, PNV10_XSCOM_SIZE, PNV10_XSCOM_BASE(chip));
pcc->parent_realize(dev, &local_err);
if (local_err) {
diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c
index d820e05e40..805b1d0c87 100644
--- a/hw/ppc/pnv_xscom.c
+++ b/hw/ppc/pnv_xscom.c
@@ -221,15 +221,14 @@ const MemoryRegionOps pnv_xscom_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
-void pnv_xscom_realize(PnvChip *chip, uint64_t size, Error **errp)
+void pnv_xscom_init(PnvChip *chip, uint64_t size, hwaddr addr)
{
- SysBusDevice *sbd = SYS_BUS_DEVICE(chip);
char *name;
name = g_strdup_printf("xscom-%x", chip->chip_id);
memory_region_init_io(&chip->xscom_mmio, OBJECT(chip), &pnv_xscom_ops,
chip, name, size);
- sysbus_init_mmio(sbd, &chip->xscom_mmio);
+ memory_region_add_subregion(get_system_memory(), addr, &chip->xscom_mmio);
memory_region_init(&chip->xscom, OBJECT(chip), name, size);
address_space_init(&chip->xscom_as, &chip->xscom, name);
diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c
index 45f409c838..a189942de4 100644
--- a/hw/ppc/ppc440_bamboo.c
+++ b/hw/ppc/ppc440_bamboo.c
@@ -24,7 +24,6 @@
#include "elf.h"
#include "hw/char/serial.h"
#include "hw/ppc/ppc.h"
-#include "ppc405.h"
#include "sysemu/sysemu.h"
#include "sysemu/reset.h"
#include "hw/sysbus.h"
diff --git a/hw/ppc/ppc440_uc.c b/hw/ppc/ppc440_uc.c
index 4181c843a8..7d6ca70387 100644
--- a/hw/ppc/ppc440_uc.c
+++ b/hw/ppc/ppc440_uc.c
@@ -73,46 +73,6 @@ typedef struct ppc4xx_l2sram_t {
uint32_t isram0[11];
} ppc4xx_l2sram_t;
-#ifdef MAP_L2SRAM
-static void l2sram_update_mappings(ppc4xx_l2sram_t *l2sram,
- uint32_t isarc, uint32_t isacntl,
- uint32_t dsarc, uint32_t dsacntl)
-{
- if (l2sram->isarc != isarc ||
- (l2sram->isacntl & 0x80000000) != (isacntl & 0x80000000)) {
- if (l2sram->isacntl & 0x80000000) {
- /* Unmap previously assigned memory region */
- memory_region_del_subregion(get_system_memory(),
- &l2sram->isarc_ram);
- }
- if (isacntl & 0x80000000) {
- /* Map new instruction memory region */
- memory_region_add_subregion(get_system_memory(), isarc,
- &l2sram->isarc_ram);
- }
- }
- if (l2sram->dsarc != dsarc ||
- (l2sram->dsacntl & 0x80000000) != (dsacntl & 0x80000000)) {
- if (l2sram->dsacntl & 0x80000000) {
- /* Beware not to unmap the region we just mapped */
- if (!(isacntl & 0x80000000) || l2sram->dsarc != isarc) {
- /* Unmap previously assigned memory region */
- memory_region_del_subregion(get_system_memory(),
- &l2sram->dsarc_ram);
- }
- }
- if (dsacntl & 0x80000000) {
- /* Beware not to remap the region we just mapped */
- if (!(isacntl & 0x80000000) || dsarc != isarc) {
- /* Map new data memory region */
- memory_region_add_subregion(get_system_memory(), dsarc,
- &l2sram->dsarc_ram);
- }
- }
- }
-}
-#endif
-
static uint32_t dcr_read_l2sram(void *opaque, int dcrn)
{
ppc4xx_l2sram_t *l2sram = opaque;
@@ -193,7 +153,6 @@ static void dcr_write_l2sram(void *opaque, int dcrn, uint32_t val)
/*l2sram->isram1[dcrn - DCR_L2CACHE_BASE] = val;*/
break;
}
- /*l2sram_update_mappings(l2sram, isarc, isacntl, dsarc, dsacntl);*/
}
static void l2sram_reset(void *opaque)
@@ -203,7 +162,6 @@ static void l2sram_reset(void *opaque)
memset(l2sram->l2cache, 0, sizeof(l2sram->l2cache));
l2sram->l2cache[DCR_L2CACHE_STAT - DCR_L2CACHE_BASE] = 0x80000000;
memset(l2sram->isram0, 0, sizeof(l2sram->isram0));
- /*l2sram_update_mappings(l2sram, isarc, isacntl, dsarc, dsacntl);*/
}
void ppc4xx_l2sram_init(CPUPPCState *env)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index cb840676d3..b25093be28 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1761,7 +1761,7 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason)
/* Signal all vCPUs waiting on this condition */
qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond);
- migrate_del_blocker(spapr->fwnmi_migration_blocker);
+ migrate_del_blocker(&spapr->fwnmi_migration_blocker);
}
static void spapr_create_nvram(SpaprMachineState *spapr)
@@ -2937,13 +2937,6 @@ static void spapr_machine_init(MachineState *machine)
spapr_create_lmb_dr_connectors(spapr);
}
- if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_ON) {
- /* Create the error string for live migration blocker */
- error_setg(&spapr->fwnmi_migration_blocker,
- "A machine check is being handled during migration. The handler"
- "may run and log hardware error on the destination");
- }
-
if (mc->nvdimm_supported) {
spapr_create_nvdimm_dr_connectors(spapr);
}
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index 4508e40814..deb4641505 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -920,7 +920,11 @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered)
* fails when running with -only-migrate. A proper interface to
* delay migration completion for a bit could avoid that.
*/
- ret = migrate_add_blocker(spapr->fwnmi_migration_blocker, NULL);
+ error_setg(&spapr->fwnmi_migration_blocker,
+ "A machine check is being handled during migration. The handler"
+ "may run and log hardware error on the destination");
+
+ ret = migrate_add_blocker(&spapr->fwnmi_migration_blocker, NULL);
if (ret == -EBUSY) {
warn_report("Received a fwnmi while migration was in progress");
}
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index 7df21581c2..26c384b261 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -496,7 +496,7 @@ static void rtas_ibm_nmi_interlock(PowerPCCPU *cpu,
spapr->fwnmi_machine_check_interlock = -1;
qemu_cond_signal(&spapr->fwnmi_machine_check_interlock_cond);
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
- migrate_del_blocker(spapr->fwnmi_migration_blocker);
+ migrate_del_blocker(&spapr->fwnmi_migration_blocker);
}
static struct rtas_call {
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index 9d4fec2c04..f8ef2b6fa8 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -574,13 +574,14 @@ SpaprVioBus *spapr_vio_bus_init(void)
/* Create bridge device */
dev = qdev_new(TYPE_SPAPR_VIO_BRIDGE);
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
/* Create bus on bridge device */
qbus = qbus_new(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio");
bus = SPAPR_VIO_BUS(qbus);
bus->next_reg = SPAPR_VIO_REG_BASE;
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
/* hcall-vio */
spapr_register_hypercall(H_VIO_SIGNAL, h_vio_signal);
diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c
index f2f81bd425..d02f330650 100644
--- a/hw/ppc/virtex_ml507.c
+++ b/hw/ppc/virtex_ml507.c
@@ -43,7 +43,6 @@
#include "hw/ppc/ppc.h"
#include "hw/ppc/ppc4xx.h"
#include "hw/qdev-properties.h"
-#include "ppc405.h"
#include <libfdt.h>
diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c
index c6ed025982..d385d18d9c 100644
--- a/hw/rdma/vmw/pvrdma_cmd.c
+++ b/hw/rdma/vmw/pvrdma_cmd.c
@@ -129,23 +129,27 @@ static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
{
struct pvrdma_cmd_query_port *cmd = &req->query_port;
struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
- struct pvrdma_port_attr attrs = {};
+ struct ibv_port_attr attrs = {};
if (cmd->port_num > MAX_PORTS) {
return -EINVAL;
}
- if (rdma_backend_query_port(&dev->backend_dev,
- (struct ibv_port_attr *)&attrs)) {
+ if (rdma_backend_query_port(&dev->backend_dev, &attrs)) {
return -ENOMEM;
}
memset(resp, 0, sizeof(*resp));
- resp->attrs.state = dev->func0->device_active ? attrs.state :
- PVRDMA_PORT_DOWN;
- resp->attrs.max_mtu = attrs.max_mtu;
- resp->attrs.active_mtu = attrs.active_mtu;
+ /*
+ * The state, max_mtu and active_mtu fields are enums; the values
+ * for pvrdma_port_state and pvrdma_mtu match those for
+ * ibv_port_state and ibv_mtu, so we can cast them safely.
+ */
+ resp->attrs.state = dev->func0->device_active ?
+ (enum pvrdma_port_state)attrs.state : PVRDMA_PORT_DOWN;
+ resp->attrs.max_mtu = (enum pvrdma_mtu)attrs.max_mtu;
+ resp->attrs.active_mtu = (enum pvrdma_mtu)attrs.active_mtu;
resp->attrs.phys_state = attrs.phys_state;
resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
resp->attrs.max_msg_sz = 1024;
diff --git a/hw/remote/proxy.c b/hw/remote/proxy.c
index 2052d721e5..fbc85a8d36 100644
--- a/hw/remote/proxy.c
+++ b/hw/remote/proxy.c
@@ -107,8 +107,7 @@ static void pci_proxy_dev_realize(PCIDevice *device, Error **errp)
error_setg(&dev->migration_blocker, "%s does not support migration",
TYPE_PCI_PROXY_DEV);
- if (migrate_add_blocker(dev->migration_blocker, errp) < 0) {
- error_free(dev->migration_blocker);
+ if (migrate_add_blocker(&dev->migration_blocker, errp) < 0) {
object_unref(dev->ioc);
return;
}
@@ -134,9 +133,7 @@ static void pci_proxy_dev_exit(PCIDevice *pdev)
qio_channel_close(dev->ioc, NULL);
}
- migrate_del_blocker(dev->migration_blocker);
-
- error_free(dev->migration_blocker);
+ migrate_del_blocker(&dev->migration_blocker);
proxy_memory_listener_deconfigure(&dev->proxy_listener);
diff --git a/hw/s390x/cpu-topology.c b/hw/s390x/cpu-topology.c
new file mode 100644
index 0000000000..f16bdf65fa
--- /dev/null
+++ b/hw/s390x/cpu-topology.c
@@ -0,0 +1,469 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CPU Topology
+ *
+ * Copyright IBM Corp. 2022, 2023
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ *
+ * S390 topology handling can be divided in two parts:
+ *
+ * - The first part in this file is taking care of all common functions
+ * used by KVM and TCG to create and modify the topology.
+ *
+ * - The second part, building the topology information data for the
+ * guest with CPU and KVM specificity will be implemented inside
+ * the target/s390/kvm sub tree.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "hw/qdev-properties.h"
+#include "hw/boards.h"
+#include "target/s390x/cpu.h"
+#include "hw/s390x/s390-virtio-ccw.h"
+#include "hw/s390x/cpu-topology.h"
+#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-events-machine-target.h"
+
+/*
+ * s390_topology is used to keep the topology information.
+ * .cores_per_socket: tracks information on the count of cores
+ * per socket.
+ * .polarization: tracks machine polarization.
+ */
+S390Topology s390_topology = {
+ /* will be initialized after the CPU model is realized */
+ .cores_per_socket = NULL,
+ .polarization = S390_CPU_POLARIZATION_HORIZONTAL,
+};
+
+/**
+ * s390_socket_nb:
+ * @cpu: s390x CPU
+ *
+ * Returns the socket number used inside the cores_per_socket array
+ * for a topology tree entry
+ */
+static int s390_socket_nb_from_ids(int drawer_id, int book_id, int socket_id)
+{
+ return (drawer_id * current_machine->smp.books + book_id) *
+ current_machine->smp.sockets + socket_id;
+}
+
+/**
+ * s390_socket_nb:
+ * @cpu: s390x CPU
+ *
+ * Returns the socket number used inside the cores_per_socket array
+ * for a cpu.
+ */
+static int s390_socket_nb(S390CPU *cpu)
+{
+ return s390_socket_nb_from_ids(cpu->env.drawer_id, cpu->env.book_id,
+ cpu->env.socket_id);
+}
+
+/**
+ * s390_has_topology:
+ *
+ * Return: true if the topology is supported by the machine.
+ */
+bool s390_has_topology(void)
+{
+ return s390_has_feat(S390_FEAT_CONFIGURATION_TOPOLOGY);
+}
+
+/**
+ * s390_topology_init:
+ * @ms: the machine state where the machine topology is defined
+ *
+ * Keep track of the machine topology.
+ *
+ * Allocate an array to keep the count of cores per socket.
+ * The index of the array starts at socket 0 from book 0 and
+ * drawer 0 up to the maximum allowed by the machine topology.
+ */
+static void s390_topology_init(MachineState *ms)
+{
+ CpuTopology *smp = &ms->smp;
+
+ s390_topology.cores_per_socket = g_new0(uint8_t, smp->sockets *
+ smp->books * smp->drawers);
+}
+
+/*
+ * s390_handle_ptf:
+ *
+ * @register 1: contains the function code
+ *
+ * Function codes 0 (horizontal) and 1 (vertical) define the CPU
+ * polarization requested by the guest.
+ *
+ * Function code 2 is handling topology changes and is interpreted
+ * by the SIE.
+ */
+void s390_handle_ptf(S390CPU *cpu, uint8_t r1, uintptr_t ra)
+{
+ CpuS390Polarization polarization;
+ CPUS390XState *env = &cpu->env;
+ uint64_t reg = env->regs[r1];
+ int fc = reg & S390_TOPO_FC_MASK;
+
+ if (!s390_has_feat(S390_FEAT_CONFIGURATION_TOPOLOGY)) {
+ s390_program_interrupt(env, PGM_OPERATION, ra);
+ return;
+ }
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ s390_program_interrupt(env, PGM_PRIVILEGED, ra);
+ return;
+ }
+
+ if (reg & ~S390_TOPO_FC_MASK) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return;
+ }
+
+ polarization = S390_CPU_POLARIZATION_VERTICAL;
+ switch (fc) {
+ case 0:
+ polarization = S390_CPU_POLARIZATION_HORIZONTAL;
+ /* fallthrough */
+ case 1:
+ if (s390_topology.polarization == polarization) {
+ env->regs[r1] |= S390_PTF_REASON_DONE;
+ setcc(cpu, 2);
+ } else {
+ s390_topology.polarization = polarization;
+ s390_cpu_topology_set_changed(true);
+ qapi_event_send_cpu_polarization_change(polarization);
+ setcc(cpu, 0);
+ }
+ break;
+ default:
+ /* Note that fc == 2 is interpreted by the SIE */
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+}
+
+/**
+ * s390_topology_reset:
+ *
+ * Generic reset for CPU topology, calls s390_topology_reset()
+ * to reset the kernel Modified Topology Change Record.
+ */
+void s390_topology_reset(void)
+{
+ s390_cpu_topology_set_changed(false);
+ s390_topology.polarization = S390_CPU_POLARIZATION_HORIZONTAL;
+}
+
+/**
+ * s390_topology_cpu_default:
+ * @cpu: pointer to a S390CPU
+ * @errp: Error pointer
+ *
+ * Setup the default topology if no attributes are already set.
+ * Passing a CPU with some, but not all, attributes set is considered
+ * an error.
+ *
+ * The function calculates the (drawer_id, book_id, socket_id)
+ * topology by filling the cores starting from the first socket
+ * (0, 0, 0) up to the last (smp->drawers, smp->books, smp->sockets).
+ *
+ * CPU type and dedication have defaults values set in the
+ * s390x_cpu_properties, entitlement must be adjust depending on the
+ * dedication.
+ *
+ * Returns false if it is impossible to setup a default topology
+ * true otherwise.
+ */
+static bool s390_topology_cpu_default(S390CPU *cpu, Error **errp)
+{
+ CpuTopology *smp = &current_machine->smp;
+ CPUS390XState *env = &cpu->env;
+
+ /* All geometry topology attributes must be set or all unset */
+ if ((env->socket_id < 0 || env->book_id < 0 || env->drawer_id < 0) &&
+ (env->socket_id >= 0 || env->book_id >= 0 || env->drawer_id >= 0)) {
+ error_setg(errp,
+ "Please define all or none of the topology geometry attributes");
+ return false;
+ }
+
+ /* If one value is unset all are unset -> calculate defaults */
+ if (env->socket_id < 0) {
+ env->socket_id = s390_std_socket(env->core_id, smp);
+ env->book_id = s390_std_book(env->core_id, smp);
+ env->drawer_id = s390_std_drawer(env->core_id, smp);
+ }
+
+ /*
+ * When the user specifies the entitlement as 'auto' on the command line,
+ * QEMU will set the entitlement as:
+ * Medium when the CPU is not dedicated.
+ * High when dedicated is true.
+ */
+ if (env->entitlement == S390_CPU_ENTITLEMENT_AUTO) {
+ if (env->dedicated) {
+ env->entitlement = S390_CPU_ENTITLEMENT_HIGH;
+ } else {
+ env->entitlement = S390_CPU_ENTITLEMENT_MEDIUM;
+ }
+ }
+ return true;
+}
+
+/**
+ * s390_topology_check:
+ * @socket_id: socket to check
+ * @book_id: book to check
+ * @drawer_id: drawer to check
+ * @entitlement: entitlement to check
+ * @dedicated: dedication to check
+ * @errp: Error pointer
+ *
+ * The function checks if the topology
+ * attributes fits inside the system topology.
+ *
+ * Returns false if the specified topology does not match with
+ * the machine topology.
+ */
+static bool s390_topology_check(uint16_t socket_id, uint16_t book_id,
+ uint16_t drawer_id, uint16_t entitlement,
+ bool dedicated, Error **errp)
+{
+ CpuTopology *smp = &current_machine->smp;
+
+ if (socket_id >= smp->sockets) {
+ error_setg(errp, "Unavailable socket: %d", socket_id);
+ return false;
+ }
+ if (book_id >= smp->books) {
+ error_setg(errp, "Unavailable book: %d", book_id);
+ return false;
+ }
+ if (drawer_id >= smp->drawers) {
+ error_setg(errp, "Unavailable drawer: %d", drawer_id);
+ return false;
+ }
+ if (entitlement >= S390_CPU_ENTITLEMENT__MAX) {
+ error_setg(errp, "Unknown entitlement: %d", entitlement);
+ return false;
+ }
+ if (dedicated && (entitlement == S390_CPU_ENTITLEMENT_LOW ||
+ entitlement == S390_CPU_ENTITLEMENT_MEDIUM)) {
+ error_setg(errp, "A dedicated CPU implies high entitlement");
+ return false;
+ }
+ return true;
+}
+
+/**
+ * s390_topology_need_report
+ * @cpu: Current cpu
+ * @drawer_id: future drawer ID
+ * @book_id: future book ID
+ * @socket_id: future socket ID
+ * @entitlement: future entitlement
+ * @dedicated: future dedicated
+ *
+ * A modified topology change report is needed if the topology
+ * tree or the topology attributes change.
+ */
+static bool s390_topology_need_report(S390CPU *cpu, int drawer_id,
+ int book_id, int socket_id,
+ uint16_t entitlement, bool dedicated)
+{
+ return cpu->env.drawer_id != drawer_id ||
+ cpu->env.book_id != book_id ||
+ cpu->env.socket_id != socket_id ||
+ cpu->env.entitlement != entitlement ||
+ cpu->env.dedicated != dedicated;
+}
+
+/**
+ * s390_update_cpu_props:
+ * @ms: the machine state
+ * @cpu: the CPU for which to update the properties from the environment.
+ *
+ */
+static void s390_update_cpu_props(MachineState *ms, S390CPU *cpu)
+{
+ CpuInstanceProperties *props;
+
+ props = &ms->possible_cpus->cpus[cpu->env.core_id].props;
+
+ props->socket_id = cpu->env.socket_id;
+ props->book_id = cpu->env.book_id;
+ props->drawer_id = cpu->env.drawer_id;
+}
+
+/**
+ * s390_topology_setup_cpu:
+ * @ms: MachineState used to initialize the topology structure on
+ * first call.
+ * @cpu: the new S390CPU to insert in the topology structure
+ * @errp: the error pointer
+ *
+ * Called from CPU hotplug to check and setup the CPU attributes
+ * before the CPU is inserted in the topology.
+ * There is no need to update the MTCR explicitly here because it
+ * will be updated by KVM on creation of the new CPU.
+ */
+void s390_topology_setup_cpu(MachineState *ms, S390CPU *cpu, Error **errp)
+{
+ int entry;
+
+ /*
+ * We do not want to initialize the topology if the CPU model
+ * does not support topology, consequently, we have to wait for
+ * the first CPU to be realized, which realizes the CPU model
+ * to initialize the topology structures.
+ *
+ * s390_topology_setup_cpu() is called from the CPU hotplug.
+ */
+ if (!s390_topology.cores_per_socket) {
+ s390_topology_init(ms);
+ }
+
+ if (!s390_topology_cpu_default(cpu, errp)) {
+ return;
+ }
+
+ if (!s390_topology_check(cpu->env.socket_id, cpu->env.book_id,
+ cpu->env.drawer_id, cpu->env.entitlement,
+ cpu->env.dedicated, errp)) {
+ return;
+ }
+
+ /* Do we still have space in the socket */
+ entry = s390_socket_nb(cpu);
+ if (s390_topology.cores_per_socket[entry] >= ms->smp.cores) {
+ error_setg(errp, "No more space on this socket");
+ return;
+ }
+
+ /* Update the count of cores in sockets */
+ s390_topology.cores_per_socket[entry] += 1;
+
+ /* topology tree is reflected in props */
+ s390_update_cpu_props(ms, cpu);
+}
+
+static void s390_change_topology(uint16_t core_id,
+ bool has_socket_id, uint16_t socket_id,
+ bool has_book_id, uint16_t book_id,
+ bool has_drawer_id, uint16_t drawer_id,
+ bool has_entitlement,
+ CpuS390Entitlement entitlement,
+ bool has_dedicated, bool dedicated,
+ Error **errp)
+{
+ MachineState *ms = current_machine;
+ int old_socket_entry;
+ int new_socket_entry;
+ bool report_needed;
+ S390CPU *cpu;
+
+ cpu = s390_cpu_addr2state(core_id);
+ if (!cpu) {
+ error_setg(errp, "Core-id %d does not exist!", core_id);
+ return;
+ }
+
+ /* Get attributes not provided from cpu and verify the new topology */
+ if (!has_socket_id) {
+ socket_id = cpu->env.socket_id;
+ }
+ if (!has_book_id) {
+ book_id = cpu->env.book_id;
+ }
+ if (!has_drawer_id) {
+ drawer_id = cpu->env.drawer_id;
+ }
+ if (!has_dedicated) {
+ dedicated = cpu->env.dedicated;
+ }
+
+ /*
+ * When the user specifies the entitlement as 'auto' on the command line,
+ * QEMU will set the entitlement as:
+ * Medium when the CPU is not dedicated.
+ * High when dedicated is true.
+ */
+ if (!has_entitlement || entitlement == S390_CPU_ENTITLEMENT_AUTO) {
+ if (dedicated) {
+ entitlement = S390_CPU_ENTITLEMENT_HIGH;
+ } else {
+ entitlement = S390_CPU_ENTITLEMENT_MEDIUM;
+ }
+ }
+
+ if (!s390_topology_check(socket_id, book_id, drawer_id,
+ entitlement, dedicated, errp)) {
+ return;
+ }
+
+ /* Check for space on new socket */
+ old_socket_entry = s390_socket_nb(cpu);
+ new_socket_entry = s390_socket_nb_from_ids(drawer_id, book_id, socket_id);
+
+ if (new_socket_entry != old_socket_entry) {
+ if (s390_topology.cores_per_socket[new_socket_entry] >=
+ ms->smp.cores) {
+ error_setg(errp, "No more space on this socket");
+ return;
+ }
+ /* Update the count of cores in sockets */
+ s390_topology.cores_per_socket[new_socket_entry] += 1;
+ s390_topology.cores_per_socket[old_socket_entry] -= 1;
+ }
+
+ /* Check if we will need to report the modified topology */
+ report_needed = s390_topology_need_report(cpu, drawer_id, book_id,
+ socket_id, entitlement,
+ dedicated);
+
+ /* All checks done, report new topology into the vCPU */
+ cpu->env.drawer_id = drawer_id;
+ cpu->env.book_id = book_id;
+ cpu->env.socket_id = socket_id;
+ cpu->env.dedicated = dedicated;
+ cpu->env.entitlement = entitlement;
+
+ /* topology tree is reflected in props */
+ s390_update_cpu_props(ms, cpu);
+
+ /* Advertise the topology change */
+ if (report_needed) {
+ s390_cpu_topology_set_changed(true);
+ }
+}
+
+void qmp_set_cpu_topology(uint16_t core,
+ bool has_socket, uint16_t socket,
+ bool has_book, uint16_t book,
+ bool has_drawer, uint16_t drawer,
+ bool has_entitlement, CpuS390Entitlement entitlement,
+ bool has_dedicated, bool dedicated,
+ Error **errp)
+{
+ if (!s390_has_topology()) {
+ error_setg(errp, "This machine doesn't support topology");
+ return;
+ }
+
+ s390_change_topology(core, has_socket, socket, has_book, book,
+ has_drawer, drawer, has_entitlement, entitlement,
+ has_dedicated, dedicated, errp);
+}
+
+CpuPolarizationInfo *qmp_query_s390x_cpu_polarization(Error **errp)
+{
+ CpuPolarizationInfo *info = g_new0(CpuPolarizationInfo, 1);
+
+ info->polarization = s390_topology.polarization;
+ return info;
+}
diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c
index 4017081d49..15d26efc95 100644
--- a/hw/s390x/css-bridge.c
+++ b/hw/s390x/css-bridge.c
@@ -95,7 +95,6 @@ static const TypeInfo virtual_css_bus_info = {
VirtualCssBus *virtual_css_bus_init(void)
{
- VirtualCssBus *cbus;
BusState *bus;
DeviceState *dev;
@@ -103,19 +102,19 @@ VirtualCssBus *virtual_css_bus_init(void)
dev = qdev_new(TYPE_VIRTUAL_CSS_BRIDGE);
object_property_add_child(qdev_get_machine(), TYPE_VIRTUAL_CSS_BRIDGE,
OBJECT(dev));
- sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
/* Create bus on bridge device */
bus = qbus_new(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css");
- cbus = VIRTUAL_CSS_BUS(bus);
/* Enable hotplugging */
qbus_set_hotplug_handler(bus, OBJECT(dev));
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
css_register_io_adapters(CSS_IO_ADAPTER_VIRTIO, true, false,
0, &error_abort);
- return cbus;
+ return VIRTUAL_CSS_BUS(bus);
}
/***************** Virtual-css Bus Bridge Device ********************/
diff --git a/hw/s390x/meson.build b/hw/s390x/meson.build
index 6fd096813a..482fd13420 100644
--- a/hw/s390x/meson.build
+++ b/hw/s390x/meson.build
@@ -23,6 +23,7 @@ s390x_ss.add(when: 'CONFIG_KVM', if_true: files(
's390-skeys-kvm.c',
's390-stattrib-kvm.c',
's390-pci-kvm.c',
+ 'cpu-topology.c',
))
s390x_ss.add(when: 'CONFIG_TCG', if_true: files(
'tod-tcg.c',
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 2d75f2131f..7262725d2e 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -45,6 +45,7 @@
#include "target/s390x/kvm/pv.h"
#include "migration/blocker.h"
#include "qapi/visitor.h"
+#include "hw/s390x/cpu-topology.h"
static Error *pv_mig_blocker;
@@ -123,6 +124,9 @@ static void subsystem_reset(void)
device_cold_reset(dev);
}
}
+ if (s390_has_topology()) {
+ s390_topology_reset();
+ }
}
static int virtio_ccw_hcall_notify(const uint64_t *args)
@@ -309,10 +313,18 @@ static void s390_cpu_plug(HotplugHandler *hotplug_dev,
{
MachineState *ms = MACHINE(hotplug_dev);
S390CPU *cpu = S390_CPU(dev);
+ ERRP_GUARD();
g_assert(!ms->possible_cpus->cpus[cpu->env.core_id].cpu);
ms->possible_cpus->cpus[cpu->env.core_id].cpu = OBJECT(dev);
+ if (s390_has_topology()) {
+ s390_topology_setup_cpu(ms, cpu, errp);
+ if (*errp) {
+ return;
+ }
+ }
+
if (dev->hotplugged) {
raise_irq_cpu_hotplug();
}
@@ -332,8 +344,7 @@ static void s390_machine_unprotect(S390CcwMachineState *ms)
s390_pv_vm_disable();
}
ms->pv = false;
- migrate_del_blocker(pv_mig_blocker);
- error_free_or_abort(&pv_mig_blocker);
+ migrate_del_blocker(&pv_mig_blocker);
ram_block_discard_disable(false);
}
@@ -356,11 +367,10 @@ static int s390_machine_protect(S390CcwMachineState *ms)
error_setg(&pv_mig_blocker,
"protected VMs are currently not migratable.");
- rc = migrate_add_blocker(pv_mig_blocker, &local_err);
+ rc = migrate_add_blocker(&pv_mig_blocker, &local_err);
if (rc) {
ram_block_discard_disable(false);
error_report_err(local_err);
- error_free_or_abort(&pv_mig_blocker);
return rc;
}
@@ -368,8 +378,7 @@ static int s390_machine_protect(S390CcwMachineState *ms)
rc = s390_pv_vm_enable();
if (rc) {
ram_block_discard_disable(false);
- migrate_del_blocker(pv_mig_blocker);
- error_free_or_abort(&pv_mig_blocker);
+ migrate_del_blocker(&pv_mig_blocker);
return rc;
}
@@ -562,11 +571,20 @@ static const CPUArchIdList *s390_possible_cpu_arch_ids(MachineState *ms)
sizeof(CPUArchId) * max_cpus);
ms->possible_cpus->len = max_cpus;
for (i = 0; i < ms->possible_cpus->len; i++) {
+ CpuInstanceProperties *props = &ms->possible_cpus->cpus[i].props;
+
ms->possible_cpus->cpus[i].type = ms->cpu_type;
ms->possible_cpus->cpus[i].vcpus_count = 1;
ms->possible_cpus->cpus[i].arch_id = i;
- ms->possible_cpus->cpus[i].props.has_core_id = true;
- ms->possible_cpus->cpus[i].props.core_id = i;
+
+ props->has_core_id = true;
+ props->core_id = i;
+ props->has_socket_id = true;
+ props->socket_id = s390_std_socket(i, &ms->smp);
+ props->has_book_id = true;
+ props->book_id = s390_std_book(i, &ms->smp);
+ props->has_drawer_id = true;
+ props->drawer_id = s390_std_drawer(i, &ms->smp);
}
return ms->possible_cpus;
@@ -744,6 +762,8 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
mc->no_sdcard = 1;
mc->max_cpus = S390_MAX_CPUS;
mc->has_hotpluggable_cpus = true;
+ mc->smp_props.books_supported = true;
+ mc->smp_props.drawers_supported = true;
assert(!mc->get_hotplug_handler);
mc->get_hotplug_handler = s390_get_hotplug_handler;
mc->cpu_index_to_instance_props = s390_cpu_index_to_props;
@@ -853,6 +873,8 @@ static void ccw_machine_8_1_class_options(MachineClass *mc)
{
ccw_machine_8_2_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_8_1, hw_compat_8_1_len);
+ mc->smp_props.drawers_supported = false;
+ mc->smp_props.books_supported = false;
}
DEFINE_CCW_MACHINE(8_1, "8.1", false);
diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c
index eff74479f4..d339cbb7e4 100644
--- a/hw/s390x/sclp.c
+++ b/hw/s390x/sclp.c
@@ -20,6 +20,7 @@
#include "hw/s390x/event-facility.h"
#include "hw/s390x/s390-pci-bus.h"
#include "hw/s390x/ipl.h"
+#include "hw/s390x/cpu-topology.h"
static inline SCLPDevice *get_sclp_device(void)
{
@@ -123,6 +124,10 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
return;
}
+ if (s390_has_topology()) {
+ read_info->stsi_parm = SCLP_READ_SCP_INFO_MNEST;
+ }
+
/* CPU information */
prepare_cpu_entries(machine, entries_start, &cpu_count);
read_info->entries_cpu = cpu_to_be16(cpu_count);
diff --git a/hw/s390x/sclpquiesce.c b/hw/s390x/sclpquiesce.c
index ce07b16884..a641089929 100644
--- a/hw/s390x/sclpquiesce.c
+++ b/hw/s390x/sclpquiesce.c
@@ -78,12 +78,10 @@ static const VMStateDescription vmstate_sclpquiesce = {
}
};
-typedef struct QuiesceNotifier QuiesceNotifier;
-
-static struct QuiesceNotifier {
+typedef struct QuiesceNotifier {
Notifier notifier;
SCLPEvent *event;
-} qn;
+} QuiesceNotifier;
static void quiesce_powerdown_req(Notifier *n, void *opaque)
{
@@ -97,6 +95,8 @@ static void quiesce_powerdown_req(Notifier *n, void *opaque)
static int quiesce_init(SCLPEvent *event)
{
+ static QuiesceNotifier qn;
+
qn.notifier.notify = quiesce_powerdown_req;
qn.event = event;
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 17c548b84f..80453718a3 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -768,10 +768,6 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
sch->cssid, sch->ssid, sch->schid, sch->devno,
ccw_dev->devno.valid ? "user-configured" : "auto-configured");
- if (kvm_enabled() && !kvm_eventfds_enabled()) {
- dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
- }
-
/* fd-based ioevents can't be synchronized in record/replay */
if (replay_mode != REPLAY_MODE_NONE) {
dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c
index a06f01af26..4c8637045d 100644
--- a/hw/scsi/vhost-scsi-common.c
+++ b/hw/scsi/vhost-scsi-common.c
@@ -16,6 +16,7 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/virtio/vhost.h"
@@ -25,7 +26,7 @@
#include "hw/virtio/virtio-access.h"
#include "hw/fw-path-provider.h"
-int vhost_scsi_common_start(VHostSCSICommon *vsc)
+int vhost_scsi_common_start(VHostSCSICommon *vsc, Error **errp)
{
int ret, i;
VirtIODevice *vdev = VIRTIO_DEVICE(vsc);
@@ -35,42 +36,51 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc)
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)vsc;
if (!k->set_guest_notifiers) {
- error_report("binding does not support guest notifiers");
+ error_setg(errp, "binding does not support guest notifiers");
return -ENOSYS;
}
ret = vhost_dev_enable_notifiers(&vsc->dev, vdev);
if (ret < 0) {
+ error_setg_errno(errp, -ret, "Error enabling host notifiers");
return ret;
}
ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, true);
if (ret < 0) {
- error_report("Error binding guest notifier");
+ error_setg_errno(errp, -ret, "Error binding guest notifier");
goto err_host_notifiers;
}
vsc->dev.acked_features = vdev->guest_features;
- assert(vsc->inflight == NULL);
- vsc->inflight = g_new0(struct vhost_inflight, 1);
- ret = vhost_dev_get_inflight(&vsc->dev,
- vs->conf.virtqueue_size,
- vsc->inflight);
+ ret = vhost_dev_prepare_inflight(&vsc->dev, vdev);
if (ret < 0) {
- error_report("Error get inflight: %d", -ret);
+ error_setg_errno(errp, -ret, "Error setting inflight format");
goto err_guest_notifiers;
}
- ret = vhost_dev_set_inflight(&vsc->dev, vsc->inflight);
- if (ret < 0) {
- error_report("Error set inflight: %d", -ret);
- goto err_guest_notifiers;
+ if (vsc->inflight) {
+ if (!vsc->inflight->addr) {
+ ret = vhost_dev_get_inflight(&vsc->dev,
+ vs->conf.virtqueue_size,
+ vsc->inflight);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Error getting inflight");
+ goto err_guest_notifiers;
+ }
+ }
+
+ ret = vhost_dev_set_inflight(&vsc->dev, vsc->inflight);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Error setting inflight");
+ goto err_guest_notifiers;
+ }
}
ret = vhost_dev_start(&vsc->dev, vdev, true);
if (ret < 0) {
- error_report("Error start vhost dev");
+ error_setg_errno(errp, -ret, "Error starting vhost dev");
goto err_guest_notifiers;
}
@@ -85,9 +95,6 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc)
return ret;
err_guest_notifiers:
- g_free(vsc->inflight);
- vsc->inflight = NULL;
-
k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
err_host_notifiers:
vhost_dev_disable_notifiers(&vsc->dev, vdev);
@@ -111,12 +118,6 @@ void vhost_scsi_common_stop(VHostSCSICommon *vsc)
}
assert(ret >= 0);
- if (vsc->inflight) {
- vhost_dev_free_inflight(vsc->inflight);
- g_free(vsc->inflight);
- vsc->inflight = NULL;
- }
-
vhost_dev_disable_notifiers(&vsc->dev, vdev);
}
diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
index 443f67daa4..5d9e06a9bb 100644
--- a/hw/scsi/vhost-scsi.c
+++ b/hw/scsi/vhost-scsi.c
@@ -75,6 +75,7 @@ static int vhost_scsi_start(VHostSCSI *s)
int ret, abi_version;
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
const VhostOps *vhost_ops = vsc->dev.vhost_ops;
+ Error *local_err = NULL;
ret = vhost_ops->vhost_scsi_get_abi_version(&vsc->dev, &abi_version);
if (ret < 0) {
@@ -88,14 +89,15 @@ static int vhost_scsi_start(VHostSCSI *s)
return -ENOSYS;
}
- ret = vhost_scsi_common_start(vsc);
+ ret = vhost_scsi_common_start(vsc, &local_err);
if (ret < 0) {
+ error_reportf_err(local_err, "Error starting vhost-scsi");
return ret;
}
ret = vhost_scsi_set_endpoint(s);
if (ret < 0) {
- error_report("Error setting vhost-scsi endpoint");
+ error_reportf_err(local_err, "Error setting vhost-scsi endpoint");
vhost_scsi_common_stop(vsc);
}
@@ -208,7 +210,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
"When external environment supports it (Orchestrator migrates "
"target SCSI device state or use shared storage over network), "
"set 'migratable' property to true to enable migration.");
- if (migrate_add_blocker(vsc->migration_blocker, errp) < 0) {
+ if (migrate_add_blocker(&vsc->migration_blocker, errp) < 0) {
goto free_virtio;
}
}
@@ -241,10 +243,9 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
free_vqs:
g_free(vqs);
if (!vsc->migratable) {
- migrate_del_blocker(vsc->migration_blocker);
+ migrate_del_blocker(&vsc->migration_blocker);
}
free_virtio:
- error_free(vsc->migration_blocker);
virtio_scsi_common_unrealize(dev);
close_fd:
if (vhostfd >= 0) {
@@ -260,8 +261,7 @@ static void vhost_scsi_unrealize(DeviceState *dev)
struct vhost_virtqueue *vqs = vsc->dev.vqs;
if (!vsc->migratable) {
- migrate_del_blocker(vsc->migration_blocker);
- error_free(vsc->migration_blocker);
+ migrate_del_blocker(&vsc->migration_blocker);
}
/* This will stop vhost backend. */
diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
index df6b66cc1a..4486500cac 100644
--- a/hw/scsi/vhost-user-scsi.c
+++ b/hw/scsi/vhost-user-scsi.c
@@ -39,69 +39,231 @@ static const int user_feature_bits[] = {
VHOST_INVALID_FEATURE_BIT
};
+static int vhost_user_scsi_start(VHostUserSCSI *s, Error **errp)
+{
+ VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
+ int ret;
+
+ ret = vhost_scsi_common_start(vsc, errp);
+ s->started_vu = !(ret < 0);
+
+ return ret;
+}
+
+static void vhost_user_scsi_stop(VHostUserSCSI *s)
+{
+ VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
+
+ if (!s->started_vu) {
+ return;
+ }
+ s->started_vu = false;
+
+ vhost_scsi_common_stop(vsc);
+}
+
static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserSCSI *s = (VHostUserSCSI *)vdev;
+ DeviceState *dev = DEVICE(vdev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
- bool start = (status & VIRTIO_CONFIG_S_DRIVER_OK) && vdev->vm_running;
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+ bool should_start = virtio_device_should_start(vdev, status);
+ Error *local_err = NULL;
+ int ret;
- if (vhost_dev_is_started(&vsc->dev) == start) {
+ if (!s->connected) {
return;
}
- if (start) {
- int ret;
+ if (vhost_dev_is_started(&vsc->dev) == should_start) {
+ return;
+ }
- ret = vhost_scsi_common_start(vsc);
+ if (should_start) {
+ ret = vhost_user_scsi_start(s, &local_err);
if (ret < 0) {
- error_report("unable to start vhost-user-scsi: %s", strerror(-ret));
- exit(1);
+ error_reportf_err(local_err, "unable to start vhost-user-scsi: %s",
+ strerror(-ret));
+ qemu_chr_fe_disconnect(&vs->conf.chardev);
}
} else {
- vhost_scsi_common_stop(vsc);
+ vhost_user_scsi_stop(s);
}
}
-static void vhost_user_scsi_reset(VirtIODevice *vdev)
+static void vhost_user_scsi_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
- VHostSCSICommon *vsc = VHOST_SCSI_COMMON(vdev);
- struct vhost_dev *dev = &vsc->dev;
+ VHostUserSCSI *s = (VHostUserSCSI *)vdev;
+ DeviceState *dev = DEVICE(vdev);
+ VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+
+ Error *local_err = NULL;
+ int i, ret;
+
+ if (!vdev->start_on_kick) {
+ return;
+ }
+
+ if (!s->connected) {
+ return;
+ }
+
+ if (vhost_dev_is_started(&vsc->dev)) {
+ return;
+ }
/*
- * Historically, reset was not implemented so only reset devices
- * that are expecting it.
+ * Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
+ * vhost here instead of waiting for .set_status().
*/
- if (!virtio_has_feature(dev->protocol_features,
- VHOST_USER_PROTOCOL_F_RESET_DEVICE)) {
+ ret = vhost_user_scsi_start(s, &local_err);
+ if (ret < 0) {
+ error_reportf_err(local_err, "vhost-user-scsi: vhost start failed: ");
+ qemu_chr_fe_disconnect(&vs->conf.chardev);
return;
}
- if (dev->vhost_ops->vhost_reset_device) {
- dev->vhost_ops->vhost_reset_device(dev);
+ /* Kick right away to begin processing requests already in vring */
+ for (i = 0; i < vsc->dev.nvqs; i++) {
+ VirtQueue *kick_vq = virtio_get_queue(vdev, i);
+
+ if (!virtio_queue_get_desc_addr(vdev, i)) {
+ continue;
+ }
+ event_notifier_set(virtio_queue_get_host_notifier(kick_vq));
}
}
-static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+static int vhost_user_scsi_connect(DeviceState *dev, Error **errp)
{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
+ VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+ int ret = 0;
+
+ if (s->connected) {
+ return 0;
+ }
+ s->connected = true;
+
+ vsc->dev.num_queues = vs->conf.num_queues;
+ vsc->dev.nvqs = VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
+ vsc->dev.vqs = s->vhost_vqs;
+ vsc->dev.vq_index = 0;
+ vsc->dev.backend_features = 0;
+
+ ret = vhost_dev_init(&vsc->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
+ errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* restore vhost state */
+ if (virtio_device_started(vdev, vdev->status)) {
+ ret = vhost_user_scsi_start(s, errp);
+ }
+
+ return ret;
+}
+
+static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event);
+
+static void vhost_user_scsi_disconnect(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
+ VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+
+ if (!s->connected) {
+ return;
+ }
+ s->connected = false;
+
+ vhost_user_scsi_stop(s);
+
+ vhost_dev_cleanup(&vsc->dev);
+
+ /* Re-instate the event handler for new connections */
+ qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
+ vhost_user_scsi_event, NULL, dev, NULL, true);
+}
+
+static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
+{
+ DeviceState *dev = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
+ VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+ Error *local_err = NULL;
+
+ switch (event) {
+ case CHR_EVENT_OPENED:
+ if (vhost_user_scsi_connect(dev, &local_err) < 0) {
+ error_report_err(local_err);
+ qemu_chr_fe_disconnect(&vs->conf.chardev);
+ return;
+ }
+ break;
+ case CHR_EVENT_CLOSED:
+ /* defer close until later to avoid circular close */
+ vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
+ vhost_user_scsi_disconnect,
+ vhost_user_scsi_event);
+ break;
+ case CHR_EVENT_BREAK:
+ case CHR_EVENT_MUX_IN:
+ case CHR_EVENT_MUX_OUT:
+ /* Ignore */
+ break;
+ }
+}
+
+static int vhost_user_scsi_realize_connect(VHostUserSCSI *s, Error **errp)
+{
+ DeviceState *dev = DEVICE(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
+ int ret;
+
+ s->connected = false;
+
+ ret = qemu_chr_fe_wait_connected(&vs->conf.chardev, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = vhost_user_scsi_connect(dev, errp);
+ if (ret < 0) {
+ qemu_chr_fe_disconnect(&vs->conf.chardev);
+ return ret;
+ }
+ assert(s->connected);
+
+ return 0;
}
static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
{
+ ERRP_GUARD();
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
- struct vhost_virtqueue *vqs = NULL;
Error *err = NULL;
int ret;
+ int retries = VU_REALIZE_CONN_RETRIES;
if (!vs->conf.chardev.chr) {
error_setg(errp, "vhost-user-scsi: missing chardev");
return;
}
- virtio_scsi_common_realize(dev, vhost_dummy_handle_output,
- vhost_dummy_handle_output,
- vhost_dummy_handle_output, &err);
+ virtio_scsi_common_realize(dev, vhost_user_scsi_handle_output,
+ vhost_user_scsi_handle_output,
+ vhost_user_scsi_handle_output, &err);
if (err != NULL) {
error_propagate(errp, err);
return;
@@ -111,18 +273,28 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
goto free_virtio;
}
- vsc->dev.nvqs = VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
- vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs);
- vsc->dev.vq_index = 0;
- vsc->dev.backend_features = 0;
- vqs = vsc->dev.vqs;
+ vsc->inflight = g_new0(struct vhost_inflight, 1);
+ s->vhost_vqs = g_new0(struct vhost_virtqueue,
+ VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues);
+
+ assert(!*errp);
+ do {
+ if (*errp) {
+ error_prepend(errp, "Reconnecting after error: ");
+ error_report_err(*errp);
+ *errp = NULL;
+ }
+ ret = vhost_user_scsi_realize_connect(s, errp);
+ } while (ret < 0 && retries--);
- ret = vhost_dev_init(&vsc->dev, &s->vhost_user,
- VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
goto free_vhost;
}
+ /* we're fully initialized, now we can operate, so add the handler */
+ qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
+ vhost_user_scsi_event, NULL, (void *)dev,
+ NULL, true);
/* Channel and lun both are 0 for bootable vhost-user-scsi disk */
vsc->channel = 0;
vsc->lun = 0;
@@ -131,8 +303,12 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
return;
free_vhost:
+ g_free(s->vhost_vqs);
+ s->vhost_vqs = NULL;
+ g_free(vsc->inflight);
+ vsc->inflight = NULL;
vhost_user_cleanup(&s->vhost_user);
- g_free(vqs);
+
free_virtio:
virtio_scsi_common_unrealize(dev);
}
@@ -142,16 +318,23 @@ static void vhost_user_scsi_unrealize(DeviceState *dev)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
- struct vhost_virtqueue *vqs = vsc->dev.vqs;
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
/* This will stop the vhost backend. */
vhost_user_scsi_set_status(vdev, 0);
+ qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL, NULL, NULL, NULL,
+ NULL, false);
vhost_dev_cleanup(&vsc->dev);
- g_free(vqs);
+ g_free(s->vhost_vqs);
+ s->vhost_vqs = NULL;
+
+ vhost_dev_free_inflight(vsc->inflight);
+ g_free(vsc->inflight);
+ vsc->inflight = NULL;
- virtio_scsi_common_unrealize(dev);
vhost_user_cleanup(&s->vhost_user);
+ virtio_scsi_common_unrealize(dev);
}
static Property vhost_user_scsi_properties[] = {
@@ -200,7 +383,6 @@ static void vhost_user_scsi_class_init(ObjectClass *klass, void *data)
vdc->get_features = vhost_scsi_common_get_features;
vdc->set_config = vhost_scsi_common_set_config;
vdc->set_status = vhost_user_scsi_set_status;
- vdc->reset = vhost_user_scsi_reset;
fwc->get_dev_path = vhost_scsi_common_get_fw_dev_path;
}
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 45b95ea070..fa53f0902c 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -761,7 +761,7 @@ static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
- VirtIOSCSICommon *vs = &s->parent_obj;
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
SCSIDevice *d;
int rc;
diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c
index 5564765a9b..40473b0db0 100644
--- a/hw/sd/sdhci.c
+++ b/hw/sd/sdhci.c
@@ -321,6 +321,8 @@ static void sdhci_poweron_reset(DeviceState *dev)
static void sdhci_data_transfer(void *opaque);
+#define BLOCK_SIZE_MASK (4 * KiB - 1)
+
static void sdhci_send_command(SDHCIState *s)
{
SDRequest request;
@@ -371,7 +373,8 @@ static void sdhci_send_command(SDHCIState *s)
sdhci_update_irq(s);
- if (!timeout && s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) {
+ if (!timeout && (s->blksize & BLOCK_SIZE_MASK) &&
+ (s->cmdreg & SDHC_CMD_DATA_PRESENT)) {
s->data_count = 0;
sdhci_data_transfer(s);
}
@@ -406,7 +409,6 @@ static void sdhci_end_transfer(SDHCIState *s)
/*
* Programmed i/o data transfer
*/
-#define BLOCK_SIZE_MASK (4 * KiB - 1)
/* Fill host controller's read buffer with BLKSIZE bytes of data from card */
static void sdhci_read_block_from_card(SDHCIState *s)
@@ -1154,7 +1156,8 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
s->sdmasysad = (s->sdmasysad & mask) | value;
MASKED_WRITE(s->sdmasysad, mask, value);
/* Writing to last byte of sdmasysad might trigger transfer */
- if (!(mask & 0xFF000000) && s->blkcnt && s->blksize &&
+ if (!(mask & 0xFF000000) && s->blkcnt &&
+ (s->blksize & BLOCK_SIZE_MASK) &&
SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) {
if (s->trnmod & SDHC_TRNS_MULTI) {
sdhci_sdma_transfer_multi_blocks(s);
@@ -1168,7 +1171,11 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
if (!TRANSFERRING_DATA(s->prnsts)) {
uint16_t blksize = s->blksize;
- MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12));
+ /*
+ * [14:12] SDMA Buffer Boundary
+ * [11:00] Transfer Block Size
+ */
+ MASKED_WRITE(s->blksize, mask, extract32(value, 0, 15));
MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
/* Limit block size to the maximum buffer size */
diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c
index d908a38f73..c871170378 100644
--- a/hw/sparc64/sun4u.c
+++ b/hw/sparc64/sun4u.c
@@ -360,11 +360,11 @@ static void ebus_realize(PCIDevice *pci_dev, Error **errp)
pci_dev->config[0x09] = 0x00; // programming i/f
pci_dev->config[0x0D] = 0x0a; // latency_timer
- memory_region_init_alias(&s->bar0, OBJECT(s), "bar0", get_system_io(),
- 0, 0x1000000);
+ memory_region_init_alias(&s->bar0, OBJECT(s), "bar0",
+ pci_address_space_io(pci_dev), 0, 0x1000000);
pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0);
- memory_region_init_alias(&s->bar1, OBJECT(s), "bar1", get_system_io(),
- 0, 0x8000);
+ memory_region_init_alias(&s->bar1, OBJECT(s), "bar1",
+ pci_address_space_io(pci_dev), 0, 0x8000);
pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->bar1);
}
diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c
index e4093e2904..b25da448c8 100644
--- a/hw/timer/i8254_common.c
+++ b/hw/timer/i8254_common.c
@@ -52,10 +52,8 @@ int pit_get_out(PITChannelState *s, int64_t current_time)
switch (s->mode) {
default:
case 0:
- out = (d >= s->count);
- break;
case 1:
- out = (d < s->count);
+ out = (d >= s->count);
break;
case 2:
if ((d % s->count) == 0 && d != 0) {
diff --git a/hw/timer/npcm7xx_timer.c b/hw/timer/npcm7xx_timer.c
index 32f5e021f8..a8bd93aeb2 100644
--- a/hw/timer/npcm7xx_timer.c
+++ b/hw/timer/npcm7xx_timer.c
@@ -138,6 +138,9 @@ static int64_t npcm7xx_timer_count_to_ns(NPCM7xxTimer *t, uint32_t count)
/* Convert a time interval in nanoseconds to a timer cycle count. */
static uint32_t npcm7xx_timer_ns_to_count(NPCM7xxTimer *t, int64_t ns)
{
+ if (ns < 0) {
+ return 0;
+ }
return clock_ns_to_ticks(t->ctrl->clock, ns) /
npcm7xx_tcsr_prescaler(t->tcsr);
}
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 5ff5acf1d8..d806057b40 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -129,11 +129,7 @@ int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp)
error_setg(&multiple_devices_migration_blocker,
"Multiple VFIO devices migration is supported only if all of "
"them support P2P migration");
- ret = migrate_add_blocker(multiple_devices_migration_blocker, errp);
- if (ret < 0) {
- error_free(multiple_devices_migration_blocker);
- multiple_devices_migration_blocker = NULL;
- }
+ ret = migrate_add_blocker(&multiple_devices_migration_blocker, errp);
return ret;
}
@@ -145,9 +141,7 @@ void vfio_unblock_multiple_devices_migration(void)
return;
}
- migrate_del_blocker(multiple_devices_migration_blocker);
- error_free(multiple_devices_migration_blocker);
- multiple_devices_migration_blocker = NULL;
+ migrate_del_blocker(&multiple_devices_migration_blocker);
}
bool vfio_viommu_preset(VFIODevice *vbasedev)
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index da43dcd2fe..28d422b39f 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -872,8 +872,8 @@ static int vfio_migration_init(VFIODevice *vbasedev)
NULL;
migration->vm_state = qdev_add_vm_change_state_handler_full(
vbasedev->dev, vfio_vmstate_change, prepare_cb, vbasedev);
- migration->migration_state.notify = vfio_migration_state_notifier;
- add_migration_state_change_notifier(&migration->migration_state);
+ migration_add_notifier(&migration->migration_state,
+ vfio_migration_state_notifier);
return 0;
}
@@ -882,7 +882,7 @@ static void vfio_migration_deinit(VFIODevice *vbasedev)
{
VFIOMigration *migration = vbasedev->migration;
- remove_migration_state_change_notifier(&migration->migration_state);
+ migration_remove_notifier(&migration->migration_state);
qemu_del_vm_change_state_handler(migration->vm_state);
unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
vfio_migration_free(vbasedev);
@@ -891,8 +891,6 @@ static void vfio_migration_deinit(VFIODevice *vbasedev)
static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
{
- int ret;
-
if (vbasedev->enable_migration == ON_OFF_AUTO_ON) {
error_propagate(errp, err);
return -EINVAL;
@@ -901,13 +899,7 @@ static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp)
vbasedev->migration_blocker = error_copy(err);
error_free(err);
- ret = migrate_add_blocker(vbasedev->migration_blocker, errp);
- if (ret < 0) {
- error_free(vbasedev->migration_blocker);
- vbasedev->migration_blocker = NULL;
- }
-
- return ret;
+ return migrate_add_blocker(&vbasedev->migration_blocker, errp);
}
/* ---------------------------------------------------------------------- */
@@ -994,9 +986,5 @@ void vfio_migration_exit(VFIODevice *vbasedev)
vfio_migration_deinit(vbasedev);
}
- if (vbasedev->migration_blocker) {
- migrate_del_blocker(vbasedev->migration_blocker);
- error_free(vbasedev->migration_blocker);
- vbasedev->migration_blocker = NULL;
- }
+ migrate_del_blocker(&vbasedev->migration_blocker);
}
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 8e581575c9..17f3fc6a08 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -197,11 +197,6 @@ static int vhost_kernel_set_owner(struct vhost_dev *dev)
return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
}
-static int vhost_kernel_reset_device(struct vhost_dev *dev)
-{
- return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
-}
-
static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
{
assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
@@ -322,7 +317,6 @@ const VhostOps kernel_ops = {
.vhost_get_features = vhost_kernel_get_features,
.vhost_set_backend_cap = vhost_kernel_set_backend_cap,
.vhost_set_owner = vhost_kernel_set_owner,
- .vhost_reset_device = vhost_kernel_reset_device,
.vhost_get_vq_index = vhost_kernel_get_vq_index,
.vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
.vhost_vsock_set_running = vhost_kernel_vsock_set_running,
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index e731b1d2ea..fc5f408f77 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -66,7 +66,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp)
*
* @svq: The svq
*/
-static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
+uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
{
return svq->num_free;
}
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
index 5bce67837b..19c842a15b 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -114,6 +114,7 @@ typedef struct VhostShadowVirtqueue {
bool vhost_svq_valid_features(uint64_t features, Error **errp);
+uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq);
void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
const VirtQueueElement *elem, uint32_t len);
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c
index 3d7fae3984..aff2d7eff6 100644
--- a/hw/virtio/vhost-user-gpio.c
+++ b/hw/virtio/vhost-user-gpio.c
@@ -15,7 +15,6 @@
#include "standard-headers/linux/virtio_ids.h"
#include "trace.h"
-#define REALIZE_CONNECTION_RETRIES 3
#define VHOST_NVQS 2
/* Features required from VirtIO */
@@ -290,7 +289,7 @@ static void vu_gpio_event(void *opaque, QEMUChrEvent event)
case CHR_EVENT_CLOSED:
/* defer close until later to avoid circular close */
vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev,
- vu_gpio_disconnect);
+ vu_gpio_disconnect, vu_gpio_event);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
@@ -365,7 +364,7 @@ static void vu_gpio_device_realize(DeviceState *dev, Error **errp)
qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, vu_gpio_event, NULL,
dev, NULL, true);
- retries = REALIZE_CONNECTION_RETRIES;
+ retries = VU_REALIZE_CONN_RETRIES;
g_assert(!*errp);
do {
if (*errp) {
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 68eb1f0c99..7b42ae8aae 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -264,11 +264,6 @@ struct scrub_regions {
int fd_idx;
};
-static bool ioeventfd_enabled(void)
-{
- return !kvm_enabled() || kvm_eventfds_enabled();
-}
-
static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
{
struct vhost_user *u = dev->opaque;
@@ -388,7 +383,7 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
* operations such as configuring device memory mappings or issuing device
* resets, which affect the whole device instead of individual VQs,
* vhost-user messages should only be sent once.
- *
+ *
* Devices with multiple vhost_devs are given an associated dev->vq_index
* so per_device requests are only sent if vq_index is 0.
*/
@@ -1073,9 +1068,95 @@ static int vhost_user_set_vring_endian(struct vhost_dev *dev,
return vhost_user_write(dev, &msg, NULL, 0);
}
+static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
+{
+ int ret;
+ VhostUserMsg msg = {
+ .hdr.request = request,
+ .hdr.flags = VHOST_USER_VERSION,
+ };
+
+ if (vhost_user_per_device_request(request) && dev->vq_index != 0) {
+ return 0;
+ }
+
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (msg.hdr.request != request) {
+ error_report("Received unexpected msg type. Expected %d received %d",
+ request, msg.hdr.request);
+ return -EPROTO;
+ }
+
+ if (msg.hdr.size != sizeof(msg.payload.u64)) {
+ error_report("Received bad msg size.");
+ return -EPROTO;
+ }
+
+ *u64 = msg.payload.u64;
+
+ return 0;
+}
+
+static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
+{
+ if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/* Note: "msg->hdr.flags" may be modified. */
+static int vhost_user_write_sync(struct vhost_dev *dev, VhostUserMsg *msg,
+ bool wait_for_reply)
+{
+ int ret;
+
+ if (wait_for_reply) {
+ bool reply_supported = virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK);
+ if (reply_supported) {
+ msg->hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
+ }
+ }
+
+ ret = vhost_user_write(dev, msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (wait_for_reply) {
+ uint64_t dummy;
+
+ if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
+ return process_message_reply(dev, msg);
+ }
+
+ /*
+ * We need to wait for a reply but the backend does not
+ * support replies for the command we just sent.
+ * Send VHOST_USER_GET_FEATURES which makes all backends
+ * send a reply.
+ */
+ return vhost_user_get_features(dev, &dummy);
+ }
+
+ return 0;
+}
+
static int vhost_set_vring(struct vhost_dev *dev,
unsigned long int request,
- struct vhost_vring_state *ring)
+ struct vhost_vring_state *ring,
+ bool wait_for_reply)
{
VhostUserMsg msg = {
.hdr.request = request,
@@ -1084,13 +1165,13 @@ static int vhost_set_vring(struct vhost_dev *dev,
.hdr.size = sizeof(msg.payload.state),
};
- return vhost_user_write(dev, &msg, NULL, 0);
+ return vhost_user_write_sync(dev, &msg, wait_for_reply);
}
static int vhost_user_set_vring_num(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
- return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
+ return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring, false);
}
static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
@@ -1121,7 +1202,7 @@ static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
static int vhost_user_set_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
- return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
+ return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring, false);
}
static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
@@ -1139,7 +1220,21 @@ static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
.num = enable,
};
- ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
+ /*
+ * SET_VRING_ENABLE travels from guest to QEMU to vhost-user backend /
+ * control plane thread via unix domain socket. Virtio requests travel
+ * from guest to vhost-user backend / data plane thread via eventfd.
+ * Even if the guest enables the ring first, and pushes its first virtio
+ * request second (conforming to the virtio spec), the data plane thread
+ * in the backend may see the virtio request before the control plane
+ * thread sees the queue enablement. This causes (in fact, requires) the
+ * data plane thread to discard the virtio request (it arrived on a
+ * seemingly disabled queue). To prevent this out-of-order delivery,
+ * don't let the guest proceed to pushing the virtio request until the
+ * backend control plane acknowledges enabling the queue -- IOW, pass
+ * wait_for_reply=true below.
+ */
+ ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state, true);
if (ret < 0) {
/*
* Restoring the previous state is likely infeasible, as well as
@@ -1218,7 +1313,7 @@ static int vhost_set_vring_file(struct vhost_dev *dev,
.hdr.size = sizeof(msg.payload.u64),
};
- if (ioeventfd_enabled() && file->fd > 0) {
+ if (file->fd > 0) {
fds[fd_num++] = file->fd;
} else {
msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
@@ -1245,75 +1340,9 @@ static int vhost_user_set_vring_err(struct vhost_dev *dev,
return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file);
}
-static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
-{
- int ret;
- VhostUserMsg msg = {
- .hdr.request = request,
- .hdr.flags = VHOST_USER_VERSION,
- };
-
- if (vhost_user_per_device_request(request) && dev->vq_index != 0) {
- return 0;
- }
-
- ret = vhost_user_write(dev, &msg, NULL, 0);
- if (ret < 0) {
- return ret;
- }
-
- ret = vhost_user_read(dev, &msg);
- if (ret < 0) {
- return ret;
- }
-
- if (msg.hdr.request != request) {
- error_report("Received unexpected msg type. Expected %d received %d",
- request, msg.hdr.request);
- return -EPROTO;
- }
-
- if (msg.hdr.size != sizeof(msg.payload.u64)) {
- error_report("Received bad msg size.");
- return -EPROTO;
- }
-
- *u64 = msg.payload.u64;
-
- return 0;
-}
-
-static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
-{
- if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
- return -EPROTO;
- }
-
- return 0;
-}
-
-static int enforce_reply(struct vhost_dev *dev,
- const VhostUserMsg *msg)
-{
- uint64_t dummy;
-
- if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
- return process_message_reply(dev, msg);
- }
-
- /*
- * We need to wait for a reply but the backend does not
- * support replies for the command we just sent.
- * Send VHOST_USER_GET_FEATURES which makes all backends
- * send a reply.
- */
- return vhost_user_get_features(dev, &dummy);
-}
-
static int vhost_user_set_vring_addr(struct vhost_dev *dev,
struct vhost_vring_addr *addr)
{
- int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_SET_VRING_ADDR,
.hdr.flags = VHOST_USER_VERSION,
@@ -1321,29 +1350,13 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
.hdr.size = sizeof(msg.payload.addr),
};
- bool reply_supported = virtio_has_feature(dev->protocol_features,
- VHOST_USER_PROTOCOL_F_REPLY_ACK);
-
/*
* wait for a reply if logging is enabled to make sure
* backend is actually logging changes
*/
bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG);
- if (reply_supported && wait_for_reply) {
- msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
- }
-
- ret = vhost_user_write(dev, &msg, NULL, 0);
- if (ret < 0) {
- return ret;
- }
-
- if (wait_for_reply) {
- return enforce_reply(dev, &msg);
- }
-
- return 0;
+ return vhost_user_write_sync(dev, &msg, wait_for_reply);
}
static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
@@ -1355,26 +1368,8 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
.payload.u64 = u64,
.hdr.size = sizeof(msg.payload.u64),
};
- int ret;
-
- if (wait_for_reply) {
- bool reply_supported = virtio_has_feature(dev->protocol_features,
- VHOST_USER_PROTOCOL_F_REPLY_ACK);
- if (reply_supported) {
- msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
- }
- }
-
- ret = vhost_user_write(dev, &msg, NULL, 0);
- if (ret < 0) {
- return ret;
- }
- if (wait_for_reply) {
- return enforce_reply(dev, &msg);
- }
-
- return 0;
+ return vhost_user_write_sync(dev, &msg, wait_for_reply);
}
static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status)
@@ -1482,12 +1477,17 @@ static int vhost_user_reset_device(struct vhost_dev *dev)
{
VhostUserMsg msg = {
.hdr.flags = VHOST_USER_VERSION,
+ .hdr.request = VHOST_USER_RESET_DEVICE,
};
- msg.hdr.request = virtio_has_feature(dev->protocol_features,
- VHOST_USER_PROTOCOL_F_RESET_DEVICE)
- ? VHOST_USER_RESET_DEVICE
- : VHOST_USER_RESET_OWNER;
+ /*
+ * Historically, reset was not implemented so only reset devices
+ * that are expecting it.
+ */
+ if (!virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_RESET_DEVICE)) {
+ return -ENOSYS;
+ }
return vhost_user_write(dev, &msg, NULL, 0);
}
@@ -2751,6 +2751,7 @@ typedef struct {
DeviceState *dev;
CharBackend *cd;
struct vhost_dev *vhost;
+ IOEventHandler *event_cb;
} VhostAsyncCallback;
static void vhost_user_async_close_bh(void *opaque)
@@ -2765,7 +2766,10 @@ static void vhost_user_async_close_bh(void *opaque)
*/
if (vhost->vdev) {
data->cb(data->dev);
- }
+ } else if (data->event_cb) {
+ qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
+ NULL, data->dev, NULL, true);
+ }
g_free(data);
}
@@ -2777,7 +2781,8 @@ static void vhost_user_async_close_bh(void *opaque)
*/
void vhost_user_async_close(DeviceState *d,
CharBackend *chardev, struct vhost_dev *vhost,
- vu_async_close_fn cb)
+ vu_async_close_fn cb,
+ IOEventHandler *event_cb)
{
if (!runstate_check(RUN_STATE_SHUTDOWN)) {
/*
@@ -2793,6 +2798,7 @@ void vhost_user_async_close(DeviceState *d,
data->dev = d;
data->cd = chardev;
data->vhost = vhost;
+ data->event_cb = event_cb;
/* Disable any further notifications on the chardev */
qemu_chr_fe_set_handlers(chardev,
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 9f37206ba0..aa7b272452 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1527,9 +1527,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
}
if (hdev->migration_blocker != NULL) {
- r = migrate_add_blocker(hdev->migration_blocker, errp);
+ r = migrate_add_blocker(&hdev->migration_blocker, errp);
if (r < 0) {
- error_free(hdev->migration_blocker);
goto fail_busyloop;
}
}
@@ -1597,10 +1596,7 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
memory_listener_unregister(&hdev->memory_listener);
QLIST_REMOVE(hdev, entry);
}
- if (hdev->migration_blocker) {
- migrate_del_blocker(hdev->migration_blocker);
- error_free(hdev->migration_blocker);
- }
+ migrate_del_blocker(&hdev->migration_blocker);
g_free(hdev->mem);
g_free(hdev->mem_sections);
if (hdev->vhost_ops) {
@@ -2154,3 +2150,12 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
return -ENOSYS;
}
+
+int vhost_reset_device(struct vhost_dev *hdev)
+{
+ if (hdev->vhost_ops->vhost_reset_device) {
+ return hdev->vhost_ops->vhost_reset_device(hdev);
+ }
+
+ return -ENOSYS;
+}
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index c2c6d85475..22f15e1e02 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -761,10 +761,6 @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL);
sysbus_init_irq(sbd, &proxy->irq);
- if (!kvm_eventfds_enabled()) {
- proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD;
- }
-
/* fd-based ioevents can't be synchronized in record/replay */
if (replay_mode != REPLAY_MODE_NONE) {
proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD;
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index af1f4bc187..205dbf24fb 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -332,7 +332,6 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
VirtQueue *vq = virtio_get_queue(vdev, n);
bool legacy = virtio_pci_legacy(proxy);
bool modern = virtio_pci_modern(proxy);
- bool fast_mmio = kvm_ioeventfd_any_length_enabled();
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
MemoryRegion *modern_mr = &proxy->notify.mr;
MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
@@ -343,13 +342,8 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
if (assign) {
if (modern) {
- if (fast_mmio) {
- memory_region_add_eventfd(modern_mr, modern_addr, 0,
- false, n, notifier);
- } else {
- memory_region_add_eventfd(modern_mr, modern_addr, 2,
- false, n, notifier);
- }
+ memory_region_add_eventfd(modern_mr, modern_addr, 0,
+ false, n, notifier);
if (modern_pio) {
memory_region_add_eventfd(modern_notify_mr, 0, 2,
true, n, notifier);
@@ -361,13 +355,8 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
}
} else {
if (modern) {
- if (fast_mmio) {
- memory_region_del_eventfd(modern_mr, modern_addr, 0,
- false, n, notifier);
- } else {
- memory_region_del_eventfd(modern_mr, modern_addr, 2,
- false, n, notifier);
- }
+ memory_region_del_eventfd(modern_mr, modern_addr, 0,
+ false, n, notifier);
if (modern_pio) {
memory_region_del_eventfd(modern_notify_mr, 0, 2,
true, n, notifier);
@@ -2114,10 +2103,6 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
!pci_bus_is_root(pci_get_bus(pci_dev));
- if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
- proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
- }
-
/* fd-based ioevents can't be synchronized in record/replay */
if (replay_mode != REPLAY_MODE_NONE) {
proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c
index c3512c2dae..cc24812d2e 100644
--- a/hw/virtio/virtio-pmem.c
+++ b/hw/virtio/virtio-pmem.c
@@ -147,10 +147,7 @@ static void virtio_pmem_fill_device_info(const VirtIOPMEM *pmem,
static MemoryRegion *virtio_pmem_get_memory_region(VirtIOPMEM *pmem,
Error **errp)
{
- if (!pmem->memdev) {
- error_setg(errp, "'%s' property must be set", VIRTIO_PMEM_MEMDEV_PROP);
- return NULL;
- }
+ assert(pmem->memdev);
return &pmem->memdev->mr;
}
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 6facd64fbc..fb24bc927b 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -2136,6 +2136,10 @@ void virtio_reset(void *opaque)
vdev->device_endian = virtio_default_endian();
}
+ if (vdev->vhost_started) {
+ vhost_reset_device(k->get_vhost(vdev));
+ }
+
if (k->reset) {
k->reset(vdev);
}
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 653a32ea10..9087d02769 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -2793,6 +2793,8 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
{
cache->mrs.mr = NULL;
+ /* There is no real need to initialize fv, but it makes Coverity happy. */
+ cache->fv = NULL;
}
/**
diff --git a/include/exec/target_long.h b/include/exec/target_long.h
index 93c9472971..3cd8e26a23 100644
--- a/include/exec/target_long.h
+++ b/include/exec/target_long.h
@@ -29,12 +29,14 @@ typedef uint32_t target_ulong;
#define TARGET_FMT_lx "%08x"
#define TARGET_FMT_ld "%d"
#define TARGET_FMT_lu "%u"
+#define MO_TL MO_32
#elif TARGET_LONG_SIZE == 8
typedef int64_t target_long;
typedef uint64_t target_ulong;
#define TARGET_FMT_lx "%016" PRIx64
#define TARGET_FMT_ld "%" PRId64
#define TARGET_FMT_lu "%" PRIu64
+#define MO_TL MO_64
#else
#error TARGET_LONG_SIZE undefined
#endif
diff --git a/include/hw/acpi/cxl.h b/include/hw/acpi/cxl.h
index acf4418886..8f22c71530 100644
--- a/include/hw/acpi/cxl.h
+++ b/include/hw/acpi/cxl.h
@@ -25,5 +25,6 @@ void cxl_build_cedt(GArray *table_offsets, GArray *table_data,
BIOSLinker *linker, const char *oem_id,
const char *oem_table_id, CXLState *cxl_state);
void build_cxl_osc_method(Aml *dev);
+void build_cxl_dsm_method(Aml *dev);
#endif
diff --git a/include/hw/acpi/pcihp.h b/include/hw/acpi/pcihp.h
index ef59810c17..ac21a95913 100644
--- a/include/hw/acpi/pcihp.h
+++ b/include/hw/acpi/pcihp.h
@@ -56,7 +56,7 @@ typedef struct AcpiPciHpState {
} AcpiPciHpState;
void acpi_pcihp_init(Object *owner, AcpiPciHpState *, PCIBus *root,
- MemoryRegion *address_space_io, uint16_t io_base);
+ MemoryRegion *io, uint16_t io_base);
bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus);
void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev,
diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h
index 8adff70072..cb832bc1ee 100644
--- a/include/hw/arm/aspeed_soc.h
+++ b/include/hw/arm/aspeed_soc.h
@@ -47,20 +47,14 @@
#define ASPEED_JTAG_NUM 2
struct AspeedSoCState {
- /*< private >*/
DeviceState parent;
- /*< public >*/
- ARMCPU cpu[ASPEED_CPUS_NUM];
- A15MPPrivState a7mpcore;
- ARMv7MState armv7m;
MemoryRegion *memory;
MemoryRegion *dram_mr;
MemoryRegion dram_container;
MemoryRegion sram;
MemoryRegion spi_boot_container;
MemoryRegion spi_boot;
- AspeedVICState vic;
AspeedRtcState rtc;
AspeedTimerCtrlState timerctrl;
AspeedI2CState i2c;
@@ -101,6 +95,35 @@ struct AspeedSoCState {
#define TYPE_ASPEED_SOC "aspeed-soc"
OBJECT_DECLARE_TYPE(AspeedSoCState, AspeedSoCClass, ASPEED_SOC)
+struct Aspeed2400SoCState {
+ AspeedSoCState parent;
+
+ ARMCPU cpu[ASPEED_CPUS_NUM];
+ AspeedVICState vic;
+};
+
+#define TYPE_ASPEED2400_SOC "aspeed2400-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed2400SoCState, ASPEED2400_SOC)
+
+struct Aspeed2600SoCState {
+ AspeedSoCState parent;
+
+ A15MPPrivState a7mpcore;
+ ARMCPU cpu[ASPEED_CPUS_NUM]; /* XXX belong to a7mpcore */
+};
+
+#define TYPE_ASPEED2600_SOC "aspeed2600-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed2600SoCState, ASPEED2600_SOC)
+
+struct Aspeed10x0SoCState {
+ AspeedSoCState parent;
+
+ ARMv7MState armv7m;
+};
+
+#define TYPE_ASPEED10X0_SOC "aspeed10x0-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(Aspeed10x0SoCState, ASPEED10X0_SOC)
+
struct AspeedSoCClass {
DeviceClass parent_class;
diff --git a/include/hw/arm/bsa.h b/include/hw/arm/bsa.h
new file mode 100644
index 0000000000..8eaab603c0
--- /dev/null
+++ b/include/hw/arm/bsa.h
@@ -0,0 +1,35 @@
+/*
+ * Common definitions for Arm Base System Architecture (BSA) platforms.
+ *
+ * Copyright (c) 2015 Linaro Limited
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QEMU_ARM_BSA_H
+#define QEMU_ARM_BSA_H
+
+/* These are architectural INTID values */
+#define VIRTUAL_PMU_IRQ 23
+#define ARCH_GIC_MAINT_IRQ 25
+#define ARCH_TIMER_NS_EL2_IRQ 26
+#define ARCH_TIMER_VIRT_IRQ 27
+#define ARCH_TIMER_NS_EL2_VIRT_IRQ 28
+#define ARCH_TIMER_S_EL1_IRQ 29
+#define ARCH_TIMER_NS_EL1_IRQ 30
+
+#define INTID_TO_PPI(irq) ((irq) - 16)
+
+#endif /* QEMU_ARM_BSA_H */
diff --git a/include/hw/arm/exynos4210.h b/include/hw/arm/exynos4210.h
index 68db19f0cb..d33fe38586 100644
--- a/include/hw/arm/exynos4210.h
+++ b/include/hw/arm/exynos4210.h
@@ -30,7 +30,7 @@
#include "hw/intc/exynos4210_gic.h"
#include "hw/intc/exynos4210_combiner.h"
#include "hw/core/split-irq.h"
-#include "target/arm/cpu-qom.h"
+#include "hw/arm/boot.h"
#include "qom/object.h"
#define EXYNOS4210_NCPUS 2
diff --git a/include/hw/misc/raspberrypi-fw-defs.h b/include/hw/arm/raspberrypi-fw-defs.h
index 4551fe7450..4551fe7450 100644
--- a/include/hw/misc/raspberrypi-fw-defs.h
+++ b/include/hw/arm/raspberrypi-fw-defs.h
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index e1ddbea96b..f69239850e 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -34,6 +34,7 @@
#include "qemu/notify.h"
#include "hw/boards.h"
#include "hw/arm/boot.h"
+#include "hw/arm/bsa.h"
#include "hw/block/flash.h"
#include "sysemu/kvm.h"
#include "hw/intc/arm_gicv3_common.h"
@@ -43,17 +44,6 @@
#define NUM_VIRTIO_TRANSPORTS 32
#define NUM_SMMU_IRQS 4
-#define ARCH_GIC_MAINT_IRQ 9
-
-#define ARCH_TIMER_VIRT_IRQ 11
-#define ARCH_TIMER_S_EL1_IRQ 13
-#define ARCH_TIMER_NS_EL1_IRQ 14
-#define ARCH_TIMER_NS_EL2_IRQ 10
-
-#define VIRTUAL_PMU_IRQ 7
-
-#define PPI(irq) ((irq) + 16)
-
/* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */
#define PVTIME_SIZE_PER_CPU 64
diff --git a/include/hw/audio/pcspk.h b/include/hw/audio/pcspk.h
index 9506179587..6be75a6b86 100644
--- a/include/hw/audio/pcspk.h
+++ b/include/hw/audio/pcspk.h
@@ -25,16 +25,6 @@
#ifndef HW_PCSPK_H
#define HW_PCSPK_H
-#include "hw/isa/isa.h"
-#include "hw/qdev-properties.h"
-#include "qapi/error.h"
-
#define TYPE_PC_SPEAKER "isa-pcspk"
-static inline void pcspk_init(ISADevice *isadev, ISABus *bus, ISADevice *pit)
-{
- object_property_set_link(OBJECT(isadev), "pit", OBJECT(pit), NULL);
- isa_realize_and_unref(isadev, bus, &error_fatal);
-}
-
#endif /* HW_PCSPK_H */
diff --git a/include/hw/boards.h b/include/hw/boards.h
index 43a56dc51e..a735999298 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -135,12 +135,16 @@ typedef struct {
* @clusters_supported - whether clusters are supported by the machine
* @has_clusters - whether clusters are explicitly specified in the user
* provided SMP configuration
+ * @books_supported - whether books are supported by the machine
+ * @drawers_supported - whether drawers are supported by the machine
*/
typedef struct {
bool prefer_sockets;
bool dies_supported;
bool clusters_supported;
bool has_clusters;
+ bool books_supported;
+ bool drawers_supported;
} SMPCompatProps;
/**
@@ -323,7 +327,9 @@ typedef struct DeviceMemoryState {
/**
* CpuTopology:
* @cpus: the number of present logical processors on the machine
- * @sockets: the number of sockets on the machine
+ * @drawers: the number of drawers on the machine
+ * @books: the number of books in one drawer
+ * @sockets: the number of sockets in one book
* @dies: the number of dies in one socket
* @clusters: the number of clusters in one die
* @cores: the number of cores in one cluster
@@ -332,6 +338,8 @@ typedef struct DeviceMemoryState {
*/
typedef struct CpuTopology {
unsigned int cpus;
+ unsigned int drawers;
+ unsigned int books;
unsigned int sockets;
unsigned int dies;
unsigned int clusters;
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 3968369554..18593db5b2 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -618,8 +618,10 @@ bool cpu_paging_enabled(const CPUState *cpu);
* @cpu: The CPU whose memory mappings are to be obtained.
* @list: Where to write the memory mappings to.
* @errp: Pointer for reporting an #Error.
+ *
+ * Returns: %true on success, %false otherwise.
*/
-void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
Error **errp);
#if !defined(CONFIG_USER_ONLY)
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
index ee169b872c..24d003fe04 100644
--- a/include/hw/core/sysemu-cpu-ops.h
+++ b/include/hw/core/sysemu-cpu-ops.h
@@ -19,7 +19,7 @@ typedef struct SysemuCPUOps {
/**
* @get_memory_mapping: Callback for obtaining the memory mappings.
*/
- void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
+ bool (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
Error **errp);
/**
* @get_paging_enabled: Callback for inquiring whether paging is enabled.
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index bec38cb92c..29a9724524 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -42,6 +42,7 @@ typedef struct PCMachineState {
uint64_t max_ram_below_4g;
OnOffAuto vmport;
SmbiosEntryPointType smbios_entry_point_type;
+ const char *south_bridge;
bool acpi_build_enabled;
bool smbus_enabled;
@@ -92,6 +93,7 @@ struct PCMachineClass {
/* Device configuration: */
bool pci_enabled;
bool kvmclock_enabled;
+ const char *default_south_bridge;
/* Compat options: */
diff --git a/include/hw/mips/cpudevs.h b/include/hw/mips/cpudevs.h
deleted file mode 100644
index f7c9728fa9..0000000000
--- a/include/hw/mips/cpudevs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef HW_MIPS_CPUDEVS_H
-#define HW_MIPS_CPUDEVS_H
-
-#include "target/mips/cpu-qom.h"
-
-/* Definitions for MIPS CPU internal devices. */
-
-/* mips_int.c */
-void cpu_mips_irq_init_cpu(MIPSCPU *cpu);
-
-/* mips_timer.c */
-void cpu_mips_clock_init(MIPSCPU *cpu);
-
-#endif
diff --git a/include/hw/misc/mips_itu.h b/include/hw/misc/mips_itu.h
index 35218b2d14..5caed6cc36 100644
--- a/include/hw/misc/mips_itu.h
+++ b/include/hw/misc/mips_itu.h
@@ -73,10 +73,12 @@ struct MIPSITUState {
/* SAAR */
uint64_t *saar;
- MIPSCPU *cpu0;
+ ArchCPU *cpu0;
};
/* Get ITC Configuration Tag memory region. */
MemoryRegion *mips_itu_get_tag_region(MIPSITUState *itu);
+void itc_reconfigure(struct MIPSITUState *tag);
+
#endif /* MIPS_ITU_H */
diff --git a/include/hw/nvram/xlnx-bbram.h b/include/hw/nvram/xlnx-bbram.h
index 87d59ef3c0..6fc13f8cc1 100644
--- a/include/hw/nvram/xlnx-bbram.h
+++ b/include/hw/nvram/xlnx-bbram.h
@@ -34,7 +34,7 @@
#define RMAX_XLNX_BBRAM ((0x4c / 4) + 1)
-#define TYPE_XLNX_BBRAM "xlnx,bbram-ctrl"
+#define TYPE_XLNX_BBRAM "xlnx.bbram-ctrl"
OBJECT_DECLARE_SIMPLE_TYPE(XlnxBBRam, XLNX_BBRAM);
struct XlnxBBRam {
diff --git a/include/hw/pci-host/astro.h b/include/hw/pci-host/astro.h
new file mode 100644
index 0000000000..f63fd220f3
--- /dev/null
+++ b/include/hw/pci-host/astro.h
@@ -0,0 +1,92 @@
+/*
+ * HP-PARISC Astro Bus connector with Elroy PCI host bridges
+ */
+
+#ifndef ASTRO_H
+#define ASTRO_H
+
+#include "hw/pci/pci_host.h"
+
+#define ASTRO_HPA 0xfed00000
+
+#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
+
+#define TYPE_ASTRO_CHIP "astro-chip"
+OBJECT_DECLARE_SIMPLE_TYPE(AstroState, ASTRO_CHIP)
+
+#define TYPE_ELROY_PCI_HOST_BRIDGE "elroy-pcihost"
+OBJECT_DECLARE_SIMPLE_TYPE(ElroyState, ELROY_PCI_HOST_BRIDGE)
+
+#define ELROY_NUM 4 /* # of Elroys */
+#define ELROY_IRQS 8 /* IOSAPIC IRQs */
+
+/* ASTRO Memory and I/O regions */
+#define LMMIO_DIST_BASE_ADDR 0xf4000000ULL
+#define LMMIO_DIST_BASE_SIZE 0x4000000ULL
+
+#define IOS_DIST_BASE_ADDR 0xfffee00000ULL
+#define IOS_DIST_BASE_SIZE 0x10000ULL
+
+struct AstroState;
+
+struct ElroyState {
+ PCIHostState parent_obj;
+
+ /* parent Astro device */
+ struct AstroState *astro;
+
+ /* HPA of this Elroy */
+ hwaddr hpa;
+
+ /* PCI bus number (Elroy number) */
+ unsigned int pci_bus_num;
+
+ uint64_t config_address;
+ uint64_t config_reg_elroy;
+
+ uint64_t status_control;
+ uint64_t arb_mask;
+ uint64_t mmio_base[(0x0250 - 0x200) / 8];
+ uint64_t error_config;
+
+ uint32_t iosapic_reg_select;
+ uint64_t iosapic_reg[0x20];
+
+ uint32_t ilr;
+
+ MemoryRegion this_mem;
+
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_mmio_alias;
+ MemoryRegion pci_hole;
+ MemoryRegion pci_io;
+};
+
+struct AstroState {
+ PCIHostState parent_obj;
+
+ uint64_t ioc_ctrl;
+ uint64_t ioc_status_ctrl;
+ uint64_t ioc_ranges[(0x03d8 - 0x300) / 8];
+ uint64_t ioc_rope_config;
+ uint64_t ioc_status_control;
+ uint64_t ioc_flush_control;
+ uint64_t ioc_rope_control[8];
+ uint64_t tlb_ibase;
+ uint64_t tlb_imask;
+ uint64_t tlb_pcom;
+ uint64_t tlb_tcnfg;
+ uint64_t tlb_pdir_base;
+
+ struct ElroyState *elroy[ELROY_NUM];
+
+ MemoryRegion this_mem;
+
+ MemoryRegion pci_mmio;
+ MemoryRegion pci_io;
+
+ IOMMUMemoryRegion iommu;
+ AddressSpace iommu_as;
+};
+
+#endif
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index b70a0b95ff..ea5aff118b 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -279,12 +279,10 @@ bool pci_bus_is_express(const PCIBus *bus);
void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
const char *name,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, const char *typename);
PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, const char *typename);
void pci_root_bus_cleanup(PCIBus *bus);
void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
@@ -304,8 +302,7 @@ int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin);
PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
void *irq_opaque,
- MemoryRegion *address_space_mem,
- MemoryRegion *address_space_io,
+ MemoryRegion *mem, MemoryRegion *io,
uint8_t devfn_min, int nirq,
const char *typename);
void pci_unregister_root_bus(PCIBus *bus);
diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h
index 85469b9b53..f1a53fea8d 100644
--- a/include/hw/pci/pci_ids.h
+++ b/include/hw/pci/pci_ids.h
@@ -179,6 +179,8 @@
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+#define PCI_VENDOR_ID_HP 0x103c
+
#define PCI_VENDOR_ID_TI 0x104c
#define PCI_VENDOR_ID_MOTOROLA 0x1057
diff --git a/include/hw/ppc/pnv_xscom.h b/include/hw/ppc/pnv_xscom.h
index 9bc6463547..35b19610f7 100644
--- a/include/hw/ppc/pnv_xscom.h
+++ b/include/hw/ppc/pnv_xscom.h
@@ -170,7 +170,7 @@ struct PnvXScomInterfaceClass {
#define PNV10_XSCOM_PEC_PCI_BASE 0x8010800 /* index goes upwards ... */
#define PNV10_XSCOM_PEC_PCI_SIZE 0x200
-void pnv_xscom_realize(PnvChip *chip, uint64_t size, Error **errp);
+void pnv_xscom_init(PnvChip *chip, uint64_t size, hwaddr addr);
int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset,
uint64_t xscom_base, uint64_t xscom_size,
const char *compat, int compat_size);
diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h
index 0ac327ae60..e4f8a13afc 100644
--- a/include/hw/qdev-properties-system.h
+++ b/include/hw/qdev-properties-system.h
@@ -22,6 +22,7 @@ extern const PropertyInfo qdev_prop_audiodev;
extern const PropertyInfo qdev_prop_off_auto_pcibar;
extern const PropertyInfo qdev_prop_pcie_link_speed;
extern const PropertyInfo qdev_prop_pcie_link_width;
+extern const PropertyInfo qdev_prop_cpus390entitlement;
#define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \
DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t)
@@ -73,5 +74,8 @@ extern const PropertyInfo qdev_prop_pcie_link_width;
#define DEFINE_PROP_UUID_NODEFAULT(_name, _state, _field) \
DEFINE_PROP(_name, _state, _field, qdev_prop_uuid, QemuUUID)
+#define DEFINE_PROP_CPUS390ENTITLEMENT(_n, _s, _f, _d) \
+ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_cpus390entitlement, \
+ CpuS390Entitlement)
#endif
diff --git a/include/hw/s390x/cpu-topology.h b/include/hw/s390x/cpu-topology.h
new file mode 100644
index 0000000000..c064f427e9
--- /dev/null
+++ b/include/hw/s390x/cpu-topology.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CPU Topology
+ *
+ * Copyright IBM Corp. 2022, 2023
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+#ifndef HW_S390X_CPU_TOPOLOGY_H
+#define HW_S390X_CPU_TOPOLOGY_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include "qemu/queue.h"
+#include "hw/boards.h"
+#include "qapi/qapi-types-machine-target.h"
+
+#define S390_TOPOLOGY_CPU_IFL 0x03
+
+typedef struct S390TopologyId {
+ uint8_t sentinel;
+ uint8_t drawer;
+ uint8_t book;
+ uint8_t socket;
+ uint8_t type;
+ uint8_t vertical:1;
+ uint8_t entitlement:2;
+ uint8_t dedicated;
+ uint8_t origin;
+} S390TopologyId;
+
+typedef struct S390TopologyEntry {
+ QTAILQ_ENTRY(S390TopologyEntry) next;
+ S390TopologyId id;
+ uint64_t mask;
+} S390TopologyEntry;
+
+typedef struct S390Topology {
+ uint8_t *cores_per_socket;
+ CpuS390Polarization polarization;
+} S390Topology;
+
+typedef QTAILQ_HEAD(, S390TopologyEntry) S390TopologyList;
+
+#ifdef CONFIG_KVM
+bool s390_has_topology(void);
+void s390_topology_setup_cpu(MachineState *ms, S390CPU *cpu, Error **errp);
+void s390_topology_reset(void);
+#else
+static inline bool s390_has_topology(void)
+{
+ return false;
+}
+static inline void s390_topology_setup_cpu(MachineState *ms,
+ S390CPU *cpu,
+ Error **errp) {}
+static inline void s390_topology_reset(void)
+{
+ /* Unreachable, CPU topology not implemented for TCG */
+ assert(false);
+}
+#endif
+
+extern S390Topology s390_topology;
+
+static inline int s390_std_socket(int n, CpuTopology *smp)
+{
+ return (n / smp->cores) % smp->sockets;
+}
+
+static inline int s390_std_book(int n, CpuTopology *smp)
+{
+ return (n / (smp->cores * smp->sockets)) % smp->books;
+}
+
+static inline int s390_std_drawer(int n, CpuTopology *smp)
+{
+ return (n / (smp->cores * smp->sockets * smp->books)) % smp->drawers;
+}
+
+#endif /* CONFIG_USER_ONLY */
+
+#endif
diff --git a/include/hw/s390x/s390-virtio-ccw.h b/include/hw/s390x/s390-virtio-ccw.h
index 9bba21a916..c1d46e78af 100644
--- a/include/hw/s390x/s390-virtio-ccw.h
+++ b/include/hw/s390x/s390-virtio-ccw.h
@@ -30,6 +30,12 @@ struct S390CcwMachineState {
uint8_t loadparm[8];
};
+#define S390_PTF_REASON_NONE (0x00 << 8)
+#define S390_PTF_REASON_DONE (0x01 << 8)
+#define S390_PTF_REASON_BUSY (0x02 << 8)
+#define S390_TOPO_FC_MASK 0xffUL
+void s390_handle_ptf(S390CPU *cpu, uint8_t r1, uintptr_t ra);
+
struct S390CcwMachineClass {
/*< private >*/
MachineClass parent_class;
diff --git a/include/hw/s390x/sclp.h b/include/hw/s390x/sclp.h
index cf1f2efae2..9aef6d9370 100644
--- a/include/hw/s390x/sclp.h
+++ b/include/hw/s390x/sclp.h
@@ -112,11 +112,13 @@ typedef struct CPUEntry {
} QEMU_PACKED CPUEntry;
#define SCLP_READ_SCP_INFO_FIXED_CPU_OFFSET 128
+#define SCLP_READ_SCP_INFO_MNEST 4
typedef struct ReadInfo {
SCCBHeader h;
uint16_t rnmax;
uint8_t rnsize;
- uint8_t _reserved1[16 - 11]; /* 11-15 */
+ uint8_t _reserved1[15 - 11]; /* 11-14 */
+ uint8_t stsi_parm; /* 15-15 */
uint16_t entries_cpu; /* 16-17 */
uint16_t offset_cpu; /* 18-19 */
uint8_t _reserved2[24 - 20]; /* 20-23 */
diff --git a/include/hw/southbridge/piix.h b/include/hw/southbridge/piix.h
index 278171752f..86709ba2e4 100644
--- a/include/hw/southbridge/piix.h
+++ b/include/hw/southbridge/piix.h
@@ -13,7 +13,10 @@
#define HW_SOUTHBRIDGE_PIIX_H
#include "hw/pci/pci_device.h"
+#include "hw/acpi/piix4.h"
+#include "hw/ide/pci.h"
#include "hw/rtc/mc146818rtc.h"
+#include "hw/usb/hcd-uhci.h"
/* PIRQRC[A:D]: PIRQx Route Control Registers */
#define PIIX_PIRQCA 0x60
@@ -27,7 +30,6 @@
*/
#define PIIX_RCR_IOPORT 0xcf9
-#define PIIX_NUM_PIC_IRQS 16 /* i8259 * 2 */
#define PIIX_NUM_PIRQS 4ULL /* PIRQ[A-D] */
struct PIIXState {
@@ -39,32 +41,42 @@ struct PIIXState {
* So one PIC level is tracked by PIIX_NUM_PIRQS bits.
*
* PIRQ is mapped to PIC pins, we track it by
- * PIIX_NUM_PIRQS * PIIX_NUM_PIC_IRQS = 64 bits with
+ * PIIX_NUM_PIRQS * ISA_NUM_IRQS = 64 bits with
* pic_irq * PIIX_NUM_PIRQS + pirq
*/
-#if PIIX_NUM_PIC_IRQS * PIIX_NUM_PIRQS > 64
+#if ISA_NUM_IRQS * PIIX_NUM_PIRQS > 64
#error "unable to encode pic state in 64bit in pic_levels."
#endif
uint64_t pic_levels;
- qemu_irq *pic;
+ qemu_irq cpu_intr;
+ qemu_irq isa_irqs_in[ISA_NUM_IRQS];
/* This member isn't used. Just for save/load compatibility */
int32_t pci_irq_levels_vmstate[PIIX_NUM_PIRQS];
MC146818RtcState rtc;
+ PCIIDEState ide;
+ UHCIState uhci;
+ PIIX4PMState pm;
+
+ uint32_t smb_io_base;
/* Reset Control Register contents */
uint8_t rcr;
/* IO memory region for Reset Control Register (PIIX_RCR_IOPORT) */
MemoryRegion rcr_mem;
+
+ bool has_acpi;
+ bool has_pic;
+ bool has_pit;
+ bool has_usb;
+ bool smm_enabled;
};
-typedef struct PIIXState PIIX3State;
-#define TYPE_PIIX3_PCI_DEVICE "pci-piix3"
-DECLARE_INSTANCE_CHECKER(PIIX3State, PIIX3_PCI_DEVICE,
- TYPE_PIIX3_PCI_DEVICE)
+#define TYPE_PIIX_PCI_DEVICE "pci-piix"
+OBJECT_DECLARE_SIMPLE_TYPE(PIIXState, PIIX_PCI_DEVICE)
#define TYPE_PIIX3_DEVICE "PIIX3"
#define TYPE_PIIX4_PCI_DEVICE "piix4-isa"
diff --git a/include/hw/virtio/vhost-scsi-common.h b/include/hw/virtio/vhost-scsi-common.h
index 18f115527c..c5d2c09455 100644
--- a/include/hw/virtio/vhost-scsi-common.h
+++ b/include/hw/virtio/vhost-scsi-common.h
@@ -39,7 +39,7 @@ struct VHostSCSICommon {
struct vhost_inflight *inflight;
};
-int vhost_scsi_common_start(VHostSCSICommon *vsc);
+int vhost_scsi_common_start(VHostSCSICommon *vsc, Error **errp);
void vhost_scsi_common_stop(VHostSCSICommon *vsc);
char *vhost_scsi_common_get_fw_dev_path(FWPathProvider *p, BusState *bus,
DeviceState *dev);
diff --git a/include/hw/virtio/vhost-user-scsi.h b/include/hw/virtio/vhost-user-scsi.h
index 521b08e559..78fe616ccb 100644
--- a/include/hw/virtio/vhost-user-scsi.h
+++ b/include/hw/virtio/vhost-user-scsi.h
@@ -28,7 +28,13 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCSI, VHOST_USER_SCSI)
struct VHostUserSCSI {
VHostSCSICommon parent_obj;
+
+ /* Properties */
+ bool connected;
+ bool started_vu;
+
VhostUserState vhost_user;
+ struct vhost_virtqueue *vhost_vqs;
};
#endif /* VHOST_USER_SCSI_H */
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index 9f9ddf878d..20b69d8e85 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -29,7 +29,8 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
VHOST_USER_PROTOCOL_F_STATUS = 16,
- VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 17,
+ /* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
+ VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18,
VHOST_USER_PROTOCOL_F_MAX
};
@@ -106,6 +107,7 @@ typedef void (*vu_async_close_fn)(DeviceState *cb);
void vhost_user_async_close(DeviceState *d,
CharBackend *chardev, struct vhost_dev *vhost,
- vu_async_close_fn cb);
+ vu_async_close_fn cb,
+ IOEventHandler *event_cb);
#endif
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index c7e5467693..5e8183f64a 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -8,6 +8,8 @@
#define VHOST_F_DEVICE_IOTLB 63
#define VHOST_USER_F_PROTOCOL_FEATURES 30
+#define VU_REALIZE_CONN_RETRIES 3
+
/* Generic structures common for any vhost based device. */
struct vhost_inflight {
@@ -339,4 +341,14 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
struct vhost_inflight *inflight);
bool vhost_dev_has_iommu(struct vhost_dev *dev);
+
+#ifdef CONFIG_VHOST
+int vhost_reset_device(struct vhost_dev *hdev);
+#else
+static inline int vhost_reset_device(struct vhost_dev *hdev)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_VHOST */
+
#endif
diff --git a/include/hw/virtio/virtio-input.h b/include/hw/virtio/virtio-input.h
index 08f1591424..a6c9703644 100644
--- a/include/hw/virtio/virtio-input.h
+++ b/include/hw/virtio/virtio-input.h
@@ -84,7 +84,7 @@ struct VirtIOInputHID {
VirtIOInput parent_obj;
char *display;
uint32_t head;
- QemuInputHandler *handler;
+ const QemuInputHandler *handler;
QemuInputHandlerState *hs;
int ledstate;
bool wheel_axis;
diff --git a/include/migration/blocker.h b/include/migration/blocker.h
index 9cebe2ba06..b048f301b4 100644
--- a/include/migration/blocker.h
+++ b/include/migration/blocker.h
@@ -17,19 +17,23 @@
/**
* @migrate_add_blocker - prevent migration from proceeding
*
- * @reason - an error to be returned whenever migration is attempted
+ * @reasonp - address of an error to be returned whenever migration is attempted
*
* @errp - [out] The reason (if any) we cannot block migration right now.
*
* @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
+ *
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free @reasonp, except by
+ * calling migrate_del_blocker.
*/
-int migrate_add_blocker(Error *reason, Error **errp);
+int migrate_add_blocker(Error **reasonp, Error **errp);
/**
* @migrate_add_blocker_internal - prevent migration from proceeding without
* only-migrate implications
*
- * @reason - an error to be returned whenever migration is attempted
+ * @reasonp - address of an error to be returned whenever migration is attempted
*
* @errp - [out] The reason (if any) we cannot block migration right now.
*
@@ -38,14 +42,20 @@ int migrate_add_blocker(Error *reason, Error **errp);
* Some of the migration blockers can be temporary (e.g., for a few seconds),
* so it shouldn't need to conflict with "-only-migratable". For those cases,
* we can call this function rather than @migrate_add_blocker().
+ *
+ * *@reasonp is freed and set to NULL if failure is returned.
+ * On success, the caller must not free @reasonp, except by
+ * calling migrate_del_blocker.
*/
-int migrate_add_blocker_internal(Error *reason, Error **errp);
+int migrate_add_blocker_internal(Error **reasonp, Error **errp);
/**
- * @migrate_del_blocker - remove a blocking error from migration
+ * @migrate_del_blocker - remove a blocking error from migration and free it.
+ *
+ * @reasonp - address of the error blocking migration
*
- * @reason - the error blocking migration
+ * This function frees *@reasonp and sets it to NULL.
*/
-void migrate_del_blocker(Error *reason);
+void migrate_del_blocker(Error **reasonp);
#endif
diff --git a/include/migration/misc.h b/include/migration/misc.h
index 7dcc0b5c2c..673ac490fb 100644
--- a/include/migration/misc.h
+++ b/include/migration/misc.h
@@ -60,8 +60,10 @@ void migration_object_init(void);
void migration_shutdown(void);
bool migration_is_idle(void);
bool migration_is_active(MigrationState *);
-void add_migration_state_change_notifier(Notifier *notify);
-void remove_migration_state_change_notifier(Notifier *notify);
+void migration_add_notifier(Notifier *notify,
+ void (*func)(Notifier *notifier, void *data));
+void migration_remove_notifier(Notifier *notify);
+void migration_call_notifiers(MigrationState *s);
bool migration_in_setup(MigrationState *);
bool migration_has_finished(MigrationState *);
bool migration_has_failed(MigrationState *);
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index 97a8a4f201..80b69d88f6 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -36,15 +36,11 @@ extern bool kvm_kernel_irqchip;
extern bool kvm_split_irqchip;
extern bool kvm_async_interrupts_allowed;
extern bool kvm_halt_in_kernel_allowed;
-extern bool kvm_eventfds_allowed;
-extern bool kvm_irqfds_allowed;
extern bool kvm_resamplefds_allowed;
extern bool kvm_msi_via_irqfd_allowed;
extern bool kvm_gsi_routing_allowed;
extern bool kvm_gsi_direct_mapping;
extern bool kvm_readonly_mem_allowed;
-extern bool kvm_direct_msi_allowed;
-extern bool kvm_ioeventfd_any_length_allowed;
extern bool kvm_msi_use_devid;
#define kvm_enabled() (kvm_allowed)
@@ -89,22 +85,15 @@ extern bool kvm_msi_use_devid;
#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
/**
- * kvm_eventfds_enabled:
- *
- * Returns: true if we can use eventfds to receive notifications
- * from a KVM CPU (ie the kernel supports eventds and we are running
- * with a configuration where it is meaningful to use them).
- */
-#define kvm_eventfds_enabled() (kvm_eventfds_allowed)
-
-/**
* kvm_irqfds_enabled:
*
* Returns: true if we can use irqfds to inject interrupts into
* a KVM CPU (ie the kernel supports irqfds and we are running
* with a configuration where it is meaningful to use them).
+ *
+ * Always available if running with in-kernel irqchip.
*/
-#define kvm_irqfds_enabled() (kvm_irqfds_allowed)
+#define kvm_irqfds_enabled() kvm_irqchip_in_kernel()
/**
* kvm_resamplefds_enabled:
@@ -148,19 +137,6 @@ extern bool kvm_msi_use_devid;
#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
/**
- * kvm_direct_msi_enabled:
- *
- * Returns: true if KVM allows direct MSI injection.
- */
-#define kvm_direct_msi_enabled() (kvm_direct_msi_allowed)
-
-/**
- * kvm_ioeventfd_any_length_enabled:
- * Returns: true if KVM allows any length io eventfd.
- */
-#define kvm_ioeventfd_any_length_enabled() (kvm_ioeventfd_any_length_allowed)
-
-/**
* kvm_msi_devid_required:
* Returns: true if KVM requires a device id to be provided while
* defining an MSI routing entry.
@@ -174,15 +150,12 @@ extern bool kvm_msi_use_devid;
#define kvm_irqchip_is_split() (false)
#define kvm_async_interrupts_enabled() (false)
#define kvm_halt_in_kernel() (false)
-#define kvm_eventfds_enabled() (false)
#define kvm_irqfds_enabled() (false)
#define kvm_resamplefds_enabled() (false)
#define kvm_msi_via_irqfd_enabled() (false)
#define kvm_gsi_routing_allowed() (false)
#define kvm_gsi_direct_mapping() (false)
#define kvm_readonly_mem_enabled() (false)
-#define kvm_direct_msi_enabled() (false)
-#define kvm_ioeventfd_any_length_enabled() (false)
#define kvm_msi_devid_required() (false)
#endif /* CONFIG_KVM_IS_POSSIBLE */
@@ -219,12 +192,8 @@ unsigned int kvm_get_max_memslots(void);
unsigned int kvm_get_free_memslots(void);
bool kvm_has_sync_mmu(void);
int kvm_has_vcpu_events(void);
-int kvm_has_robust_singlestep(void);
-int kvm_has_debugregs(void);
int kvm_max_nested_state_length(void);
-int kvm_has_many_ioeventfds(void);
int kvm_has_gsi_routing(void);
-int kvm_has_intx_set_mask(void);
/**
* kvm_arm_supports_user_irq
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
index 075939a3c4..fd846394be 100644
--- a/include/sysemu/kvm_int.h
+++ b/include/sysemu/kvm_int.h
@@ -78,14 +78,10 @@ struct KVMState
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
bool coalesced_flush_in_progress;
int vcpu_events;
- int robust_singlestep;
- int debugregs;
#ifdef KVM_CAP_SET_GUEST_DEBUG
QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
#endif
int max_nested_state_len;
- int many_ioeventfds;
- int intx_set_mask;
int kvm_shadow_mem;
bool kernel_irqchip_allowed;
bool kernel_irqchip_required;
@@ -103,7 +99,6 @@ struct KVMState
int nr_allocated_irq_routes;
unsigned long *used_gsi_bitmap;
unsigned int gsi_count;
- QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
#endif
KVMMemoryListener memory_listener;
QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h
index 3bbeb1bcb4..021e0a6230 100644
--- a/include/sysemu/memory_mapping.h
+++ b/include/sysemu/memory_mapping.h
@@ -71,7 +71,7 @@ void guest_phys_blocks_free(GuestPhysBlockList *list);
void guest_phys_blocks_init(GuestPhysBlockList *list);
void guest_phys_blocks_append(GuestPhysBlockList *list);
-void qemu_get_guest_memory_mapping(MemoryMappingList *list,
+bool qemu_get_guest_memory_mapping(MemoryMappingList *list,
const GuestPhysBlockList *guest_phys_blocks,
Error **errp);
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
index 2048f92b5e..677aea6dd1 100644
--- a/include/tcg/tcg-op-common.h
+++ b/include/tcg/tcg-op-common.h
@@ -346,6 +346,8 @@ void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
TCGv_i32 arg1, int32_t arg2);
void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret,
TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_negsetcondi_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, int32_t arg2);
void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2);
void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
@@ -359,6 +361,7 @@ void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg);
void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg);
void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc);
void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags);
void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg);
@@ -544,6 +547,8 @@ void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
TCGv_i64 arg1, int64_t arg2);
void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret,
TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_negsetcondi_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, int64_t arg2);
void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2);
void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
@@ -560,6 +565,7 @@ void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg);
void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg);
void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg);
void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc);
void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
@@ -747,6 +753,9 @@ void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src);
void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg);
void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi);
+void tcg_gen_ld_i128(TCGv_i128 ret, TCGv_ptr base, tcg_target_long offset);
+void tcg_gen_st_i128(TCGv_i128 val, TCGv_ptr base, tcg_target_long offset);
+
static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
{
tcg_gen_deposit_i64(ret, lo, hi, 32, 32);
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index 80cfcf8104..a02850583b 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -52,7 +52,6 @@ static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
typedef TCGv_i32 TCGv;
#define tcg_temp_new() tcg_temp_new_i32()
#define tcg_global_mem_new tcg_global_mem_new_i32
-#define tcg_temp_free tcg_temp_free_i32
#define tcgv_tl_temp tcgv_i32_temp
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
@@ -60,7 +59,6 @@ typedef TCGv_i32 TCGv;
typedef TCGv_i64 TCGv;
#define tcg_temp_new() tcg_temp_new_i64()
#define tcg_global_mem_new tcg_global_mem_new_i64
-#define tcg_temp_free tcg_temp_free_i64
#define tcgv_tl_temp tcgv_i64_temp
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
@@ -201,6 +199,7 @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
#define tcg_gen_setcond_tl tcg_gen_setcond_i64
#define tcg_gen_setcondi_tl tcg_gen_setcondi_i64
#define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i64
+#define tcg_gen_negsetcondi_tl tcg_gen_negsetcondi_i64
#define tcg_gen_mul_tl tcg_gen_mul_i64
#define tcg_gen_muli_tl tcg_gen_muli_i64
#define tcg_gen_div_tl tcg_gen_div_i64
@@ -220,6 +219,7 @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
#define tcg_gen_ext16s_tl tcg_gen_ext16s_i64
#define tcg_gen_ext32u_tl tcg_gen_ext32u_i64
#define tcg_gen_ext32s_tl tcg_gen_ext32s_i64
+#define tcg_gen_ext_tl tcg_gen_ext_i64
#define tcg_gen_bswap16_tl tcg_gen_bswap16_i64
#define tcg_gen_bswap32_tl tcg_gen_bswap32_i64
#define tcg_gen_bswap64_tl tcg_gen_bswap64_i64
@@ -319,6 +319,7 @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
#define tcg_gen_setcond_tl tcg_gen_setcond_i32
#define tcg_gen_setcondi_tl tcg_gen_setcondi_i32
#define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i32
+#define tcg_gen_negsetcondi_tl tcg_gen_negsetcondi_i32
#define tcg_gen_mul_tl tcg_gen_mul_i32
#define tcg_gen_muli_tl tcg_gen_muli_i32
#define tcg_gen_div_tl tcg_gen_div_i32
@@ -338,6 +339,7 @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
#define tcg_gen_ext16s_tl tcg_gen_ext16s_i32
#define tcg_gen_ext32u_tl tcg_gen_mov_i32
#define tcg_gen_ext32s_tl tcg_gen_mov_i32
+#define tcg_gen_ext_tl tcg_gen_ext_i32
#define tcg_gen_bswap16_tl tcg_gen_bswap16_i32
#define tcg_gen_bswap32_tl(D, S, F) tcg_gen_bswap32_i32(D, S)
#define tcg_gen_bswap_tl tcg_gen_bswap32_i32
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index 680ff00722..a9282cdcc6 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -488,11 +488,9 @@ struct TCGContext {
int nb_ops;
TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
-#ifdef CONFIG_SOFTMMU
int page_mask;
uint8_t page_bits;
uint8_t tlb_dyn_max_bits;
-#endif
uint8_t insn_start_words;
TCGBar guest_mo;
@@ -573,6 +571,12 @@ static inline bool temp_readonly(TCGTemp *ts)
return ts->kind >= TEMP_FIXED;
}
+#ifdef CONFIG_USER_ONLY
+extern bool tcg_use_softmmu;
+#else
+#define tcg_use_softmmu true
+#endif
+
extern __thread TCGContext *tcg_ctx;
extern const void *tcg_code_gen_epilogue;
extern uintptr_t tcg_splitwx_diff;
diff --git a/include/ui/input.h b/include/ui/input.h
index 24d8e4579e..8f9aac562e 100644
--- a/include/ui/input.h
+++ b/include/ui/input.h
@@ -30,7 +30,7 @@ struct QemuInputHandler {
};
QemuInputHandlerState *qemu_input_handler_register(DeviceState *dev,
- QemuInputHandler *handler);
+ const QemuInputHandler *handler);
void qemu_input_handler_activate(QemuInputHandlerState *s);
void qemu_input_handler_deactivate(QemuInputHandlerState *s);
void qemu_input_handler_unregister(QemuInputHandlerState *s);
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index f21e2e0c3d..2e3809f03c 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -1237,6 +1237,14 @@ static uint32_t get_elf_hwcap(void)
hwcaps |= HWCAP_LOONGARCH_LAM;
}
+ if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
+ hwcaps |= HWCAP_LOONGARCH_LSX;
+ }
+
+ if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
+ hwcaps |= HWCAP_LOONGARCH_LASX;
+ }
+
return hwcaps;
}
@@ -2362,31 +2370,58 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
* Map and zero the bss. We need to explicitly zero any fractional pages
* after the data section (i.e. bss). Return false on mapping failure.
*/
-static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss, int prot)
+static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss,
+ int prot, Error **errp)
{
abi_ulong align_bss;
+ /* We only expect writable bss; the code segment shouldn't need this. */
+ if (!(prot & PROT_WRITE)) {
+ error_setg(errp, "PT_LOAD with non-writable bss");
+ return false;
+ }
+
align_bss = TARGET_PAGE_ALIGN(start_bss);
end_bss = TARGET_PAGE_ALIGN(end_bss);
if (start_bss < align_bss) {
int flags = page_get_flags(start_bss);
- if (!(flags & PAGE_VALID)) {
- /* Map the start of the bss. */
+ if (!(flags & PAGE_BITS)) {
+ /*
+ * The whole address space of the executable was reserved
+ * at the start, therefore all pages will be VALID.
+ * But assuming there are no PROT_NONE PT_LOAD segments,
+ * a PROT_NONE page means no data all bss, and we can
+ * simply extend the new anon mapping back to the start
+ * of the page of bss.
+ */
align_bss -= TARGET_PAGE_SIZE;
- } else if (flags & PAGE_WRITE) {
- /* The page is already mapped writable. */
- memset(g2h_untagged(start_bss), 0, align_bss - start_bss);
} else {
- /* Read-only zeros? */
- g_assert_not_reached();
+ /*
+ * The start of the bss shares a page with something.
+ * The only thing that we expect is the data section,
+ * which would already be marked writable.
+ * Overlapping the RX code segment seems malformed.
+ */
+ if (!(flags & PAGE_WRITE)) {
+ error_setg(errp, "PT_LOAD with bss overlapping "
+ "non-writable page");
+ return false;
+ }
+
+ /* The page is already mapped and writable. */
+ memset(g2h_untagged(start_bss), 0, align_bss - start_bss);
}
}
- return align_bss >= end_bss ||
- target_mmap(align_bss, end_bss - align_bss, prot,
- MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) != -1;
+ if (align_bss < end_bss &&
+ target_mmap(align_bss, end_bss - align_bss, prot,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
+ error_setg_errno(errp, errno, "Error mapping bss");
+ return false;
+ }
+ return true;
}
#if defined(TARGET_ARM)
@@ -3410,8 +3445,8 @@ static void load_elf_image(const char *image_name, int image_fd,
/* If the load segment requests extra zeros (e.g. bss), map it. */
if (vaddr_ef < vaddr_em &&
- !zero_bss(vaddr_ef, vaddr_em, elf_prot)) {
- goto exit_mmap;
+ !zero_bss(vaddr_ef, vaddr_em, elf_prot, &err)) {
+ goto exit_errmsg;
}
/* Find the full program boundaries. */
diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c
index 8735e58bad..990b03e727 100644
--- a/linux-user/mips/cpu_loop.c
+++ b/linux-user/mips/cpu_loop.c
@@ -180,7 +180,9 @@ done_syscall:
}
force_sig_fault(TARGET_SIGFPE, si_code, env->active_tc.PC);
break;
-
+ case EXCP_OVERFLOW:
+ force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, env->active_tc.PC);
+ break;
/* The code below was inspired by the MIPS Linux kernel trap
* handling code in arch/mips/kernel/traps.c.
*/
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 8ccaab7859..7b44b9ff49 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -778,7 +778,7 @@ fail:
return -1;
}
-static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
+static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
{
abi_ulong real_start;
abi_ulong real_last;
@@ -807,7 +807,7 @@ static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
prot |= page_get_flags(a + 1);
}
if (prot != 0) {
- return;
+ return 0;
}
} else {
for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
@@ -825,7 +825,7 @@ static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
}
if (real_last < real_start) {
- return;
+ return 0;
}
}
@@ -836,32 +836,36 @@ static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
void *ptr = mmap(host_start, real_len, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS
| MAP_PRIVATE | MAP_NORESERVE, -1, 0);
- assert(ptr == host_start);
- } else {
- int ret = munmap(host_start, real_len);
- assert(ret == 0);
+ return ptr == host_start ? 0 : -1;
}
+ return munmap(host_start, real_len);
}
int target_munmap(abi_ulong start, abi_ulong len)
{
+ int ret;
+
trace_target_munmap(start, len);
if (start & ~TARGET_PAGE_MASK) {
- return -TARGET_EINVAL;
+ errno = EINVAL;
+ return -1;
}
len = TARGET_PAGE_ALIGN(len);
if (len == 0 || !guest_range_valid_untagged(start, len)) {
- return -TARGET_EINVAL;
+ errno = EINVAL;
+ return -1;
}
mmap_lock();
- mmap_reserve_or_unmap(start, len);
- page_set_flags(start, start + len - 1, 0);
- shm_region_rm_complete(start, start + len - 1);
+ ret = mmap_reserve_or_unmap(start, len);
+ if (likely(ret == 0)) {
+ page_set_flags(start, start + len - 1, 0);
+ shm_region_rm_complete(start, start + len - 1);
+ }
mmap_unlock();
- return 0;
+ return ret;
}
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c
index c4ba962708..c16c2c2d57 100644
--- a/linux-user/sh4/signal.c
+++ b/linux-user/sh4/signal.c
@@ -104,6 +104,14 @@ static void unwind_gusa(CPUSH4State *regs)
/* Reset the SP to the saved version in R1. */
regs->gregs[15] = regs->gregs[1];
+ } else if (regs->gregs[15] >= -128u && regs->pc == regs->gregs[0]) {
+ /* If we are on the last instruction of a gUSA region, we must reset
+ the SP, otherwise we would be pushing the signal context to
+ invalid memory. */
+ regs->gregs[15] = regs->gregs[1];
+ } else if (regs->flags & TB_FLAG_DELAY_SLOT) {
+ /* If we are in a delay slot, push the previous instruction. */
+ regs->pc -= 2;
}
}
diff --git a/linux-user/signal.c b/linux-user/signal.c
index a67ab47d30..3b8efec89f 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -32,6 +32,7 @@
#include "signal-common.h"
#include "host-signal.h"
#include "user/safe-syscall.h"
+#include "tcg/tcg.h"
static struct target_sigaction sigact_table[TARGET_NSIG];
@@ -43,9 +44,8 @@ abi_ulong default_sigreturn;
abi_ulong default_rt_sigreturn;
/*
- * System includes define _NSIG as SIGRTMAX + 1,
- * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
- * and the first signal is SIGHUP defined as 1
+ * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
+ * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
* Signal number 0 is reserved for use as kill(pid, 0), to test whether
* a process exists without sending it a signal.
*/
@@ -56,7 +56,6 @@ static uint8_t host_to_target_signal_table[_NSIG] = {
#define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
MAKE_SIGNAL_LIST
#undef MAKE_SIG_ENTRY
- /* next signals stay the same */
};
static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
@@ -64,18 +63,24 @@ static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
/* valid sig is between 1 and _NSIG - 1 */
int host_to_target_signal(int sig)
{
- if (sig < 1 || sig >= _NSIG) {
+ if (sig < 1) {
return sig;
}
+ if (sig >= _NSIG) {
+ return TARGET_NSIG + 1;
+ }
return host_to_target_signal_table[sig];
}
/* valid sig is between 1 and TARGET_NSIG */
int target_to_host_signal(int sig)
{
- if (sig < 1 || sig > TARGET_NSIG) {
+ if (sig < 1) {
return sig;
}
+ if (sig > TARGET_NSIG) {
+ return _NSIG;
+ }
return target_to_host_signal_table[sig];
}
@@ -487,26 +492,6 @@ void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
info->si_value.sival_ptr = (void *)(long)sival_ptr;
}
-static int fatal_signal (int sig)
-{
- switch (sig) {
- case TARGET_SIGCHLD:
- case TARGET_SIGURG:
- case TARGET_SIGWINCH:
- /* Ignored by default. */
- return 0;
- case TARGET_SIGCONT:
- case TARGET_SIGSTOP:
- case TARGET_SIGTSTP:
- case TARGET_SIGTTIN:
- case TARGET_SIGTTOU:
- /* Job control signals. */
- return 0;
- default:
- return 1;
- }
-}
-
/* returns 1 if given signal should dump core if not handled */
static int core_dump_signal(int sig)
{
@@ -526,57 +511,69 @@ static int core_dump_signal(int sig)
static void signal_table_init(void)
{
- int host_sig, target_sig, count;
+ int hsig, tsig, count;
/*
* Signals are supported starting from TARGET_SIGRTMIN and going up
- * until we run out of host realtime signals.
- * glibc at least uses only the lower 2 rt signals and probably
- * nobody's using the upper ones.
- * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
- * To fix this properly we need to do manual signal delivery multiplexed
- * over a single host signal.
+ * until we run out of host realtime signals. Glibc uses the lower 2
+ * RT signals and (hopefully) nobody uses the upper ones.
+ * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
+ * To fix this properly we would need to do manual signal delivery
+ * multiplexed over a single host signal.
* Attempts for configure "missing" signals via sigaction will be
* silently ignored.
+ *
+ * Remap the target SIGABRT, so that we can distinguish host abort
+ * from guest abort. When the guest registers a signal handler or
+ * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
+ * arrives at dump_core_and_abort(), we will map back to host SIGABRT
+ * so that the parent (native or emulated) sees the correct signal.
+ * Finally, also map host to guest SIGABRT so that the emulated
+ * parent sees the correct mapping from wait status.
*/
- for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
- target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
- if (target_sig <= TARGET_NSIG) {
- host_to_target_signal_table[host_sig] = target_sig;
+
+ hsig = SIGRTMIN;
+ host_to_target_signal_table[SIGABRT] = 0;
+ host_to_target_signal_table[hsig++] = TARGET_SIGABRT;
+
+ for (; hsig <= SIGRTMAX; hsig++) {
+ tsig = hsig - SIGRTMIN + TARGET_SIGRTMIN;
+ if (tsig <= TARGET_NSIG) {
+ host_to_target_signal_table[hsig] = tsig;
}
}
- /* generate signal conversion tables */
- for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
- target_to_host_signal_table[target_sig] = _NSIG; /* poison */
- }
- for (host_sig = 1; host_sig < _NSIG; host_sig++) {
- if (host_to_target_signal_table[host_sig] == 0) {
- host_to_target_signal_table[host_sig] = host_sig;
- }
- target_sig = host_to_target_signal_table[host_sig];
- if (target_sig <= TARGET_NSIG) {
- target_to_host_signal_table[target_sig] = host_sig;
+ /* Invert the mapping that has already been assigned. */
+ for (hsig = 1; hsig < _NSIG; hsig++) {
+ tsig = host_to_target_signal_table[hsig];
+ if (tsig) {
+ assert(target_to_host_signal_table[tsig] == 0);
+ target_to_host_signal_table[tsig] = hsig;
}
}
- if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
- for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
- if (target_to_host_signal_table[target_sig] == _NSIG) {
- count++;
- }
+ host_to_target_signal_table[SIGABRT] = TARGET_SIGABRT;
+
+ /* Map everything else out-of-bounds. */
+ for (hsig = 1; hsig < _NSIG; hsig++) {
+ if (host_to_target_signal_table[hsig] == 0) {
+ host_to_target_signal_table[hsig] = TARGET_NSIG + 1;
}
- trace_signal_table_init(count);
}
+ for (count = 0, tsig = 1; tsig <= TARGET_NSIG; tsig++) {
+ if (target_to_host_signal_table[tsig] == 0) {
+ target_to_host_signal_table[tsig] = _NSIG;
+ count++;
+ }
+ }
+
+ trace_signal_table_init(count);
}
void signal_init(void)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
- struct sigaction act;
- struct sigaction oact;
- int i;
- int host_sig;
+ struct sigaction act, oact;
/* initialize signal conversion tables */
signal_table_init();
@@ -587,22 +584,36 @@ void signal_init(void)
sigfillset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
act.sa_sigaction = host_signal_handler;
- for(i = 1; i <= TARGET_NSIG; i++) {
- host_sig = target_to_host_signal(i);
- sigaction(host_sig, NULL, &oact);
- if (oact.sa_sigaction == (void *)SIG_IGN) {
- sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
- } else if (oact.sa_sigaction == (void *)SIG_DFL) {
- sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
+
+ /*
+ * A parent process may configure ignored signals, but all other
+ * signals are default. For any target signals that have no host
+ * mapping, set to ignore. For all core_dump_signal, install our
+ * host signal handler so that we may invoke dump_core_and_abort.
+ * This includes SIGSEGV and SIGBUS, which are also need our signal
+ * handler for paging and exceptions.
+ */
+ for (int tsig = 1; tsig <= TARGET_NSIG; tsig++) {
+ int hsig = target_to_host_signal(tsig);
+ abi_ptr thand = TARGET_SIG_IGN;
+
+ if (hsig >= _NSIG) {
+ continue;
}
- /* If there's already a handler installed then something has
- gone horribly wrong, so don't even try to handle that case. */
- /* Install some handlers for our own use. We need at least
- SIGSEGV and SIGBUS, to detect exceptions. We can not just
- trap all signals because it affects syscall interrupt
- behavior. But do trap all default-fatal signals. */
- if (fatal_signal (i))
- sigaction(host_sig, &act, NULL);
+
+ /* As we force remap SIGABRT, cannot probe and install in one step. */
+ if (tsig == TARGET_SIGABRT) {
+ sigaction(SIGABRT, NULL, &oact);
+ sigaction(hsig, &act, NULL);
+ } else {
+ struct sigaction *iact = core_dump_signal(tsig) ? &act : NULL;
+ sigaction(hsig, iact, &oact);
+ }
+
+ if (oact.sa_sigaction != (void *)SIG_IGN) {
+ thand = TARGET_SIG_DFL;
+ }
+ sigact_table[tsig - 1]._sa_handler = thand;
}
}
@@ -690,14 +701,45 @@ void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
/* abort execution with signal */
static G_NORETURN
+void die_with_signal(int host_sig)
+{
+ struct sigaction act = {
+ .sa_handler = SIG_DFL,
+ };
+
+ /*
+ * The proper exit code for dying from an uncaught signal is -<signal>.
+ * The kernel doesn't allow exit() or _exit() to pass a negative value.
+ * To get the proper exit code we need to actually die from an uncaught
+ * signal. Here the default signal handler is installed, we send
+ * the signal and we wait for it to arrive.
+ */
+ sigfillset(&act.sa_mask);
+ sigaction(host_sig, &act, NULL);
+
+ kill(getpid(), host_sig);
+
+ /* Make sure the signal isn't masked (reusing the mask inside of act). */
+ sigdelset(&act.sa_mask, host_sig);
+ sigsuspend(&act.sa_mask);
+
+ /* unreachable */
+ _exit(EXIT_FAILURE);
+}
+
+static G_NORETURN
void dump_core_and_abort(CPUArchState *env, int target_sig)
{
CPUState *cpu = env_cpu(env);
TaskState *ts = (TaskState *)cpu->opaque;
int host_sig, core_dumped = 0;
- struct sigaction act;
- host_sig = target_to_host_signal(target_sig);
+ /* On exit, undo the remapping of SIGABRT. */
+ if (target_sig == TARGET_SIGABRT) {
+ host_sig = SIGABRT;
+ } else {
+ host_sig = target_to_host_signal(target_sig);
+ }
trace_user_dump_core_and_abort(env, target_sig, host_sig);
gdb_signalled(env, target_sig);
@@ -719,29 +761,7 @@ void dump_core_and_abort(CPUArchState *env, int target_sig)
}
preexit_cleanup(env, 128 + target_sig);
-
- /* The proper exit code for dying from an uncaught signal is
- * -<signal>. The kernel doesn't allow exit() or _exit() to pass
- * a negative value. To get the proper exit code we need to
- * actually die from an uncaught signal. Here the default signal
- * handler is installed, we send ourself a signal and we wait for
- * it to arrive. */
- sigfillset(&act.sa_mask);
- act.sa_handler = SIG_DFL;
- act.sa_flags = 0;
- sigaction(host_sig, &act, NULL);
-
- /* For some reason raise(host_sig) doesn't send the signal when
- * statically linked on x86-64. */
- kill(getpid(), host_sig);
-
- /* Make sure the signal isn't masked (just reuse the mask inside
- of act) */
- sigdelset(&act.sa_mask, host_sig);
- sigsuspend(&act.sa_mask);
-
- /* unreachable */
- abort();
+ die_with_signal(host_sig);
}
/* queue a signal so that it will be send to the virtual CPU as soon
@@ -775,6 +795,161 @@ static inline void rewind_if_in_safe_syscall(void *puc)
}
}
+static G_NORETURN
+void die_from_signal(siginfo_t *info)
+{
+ char sigbuf[4], codebuf[12];
+ const char *sig, *code = NULL;
+
+ switch (info->si_signo) {
+ case SIGSEGV:
+ sig = "SEGV";
+ switch (info->si_code) {
+ case SEGV_MAPERR:
+ code = "MAPERR";
+ break;
+ case SEGV_ACCERR:
+ code = "ACCERR";
+ break;
+ }
+ break;
+ case SIGBUS:
+ sig = "BUS";
+ switch (info->si_code) {
+ case BUS_ADRALN:
+ code = "ADRALN";
+ break;
+ case BUS_ADRERR:
+ code = "ADRERR";
+ break;
+ }
+ break;
+ case SIGILL:
+ sig = "ILL";
+ switch (info->si_code) {
+ case ILL_ILLOPC:
+ code = "ILLOPC";
+ break;
+ case ILL_ILLOPN:
+ code = "ILLOPN";
+ break;
+ case ILL_ILLADR:
+ code = "ILLADR";
+ break;
+ case ILL_PRVOPC:
+ code = "PRVOPC";
+ break;
+ case ILL_PRVREG:
+ code = "PRVREG";
+ break;
+ case ILL_COPROC:
+ code = "COPROC";
+ break;
+ }
+ break;
+ case SIGFPE:
+ sig = "FPE";
+ switch (info->si_code) {
+ case FPE_INTDIV:
+ code = "INTDIV";
+ break;
+ case FPE_INTOVF:
+ code = "INTOVF";
+ break;
+ }
+ break;
+ case SIGTRAP:
+ sig = "TRAP";
+ break;
+ default:
+ snprintf(sigbuf, sizeof(sigbuf), "%d", info->si_signo);
+ sig = sigbuf;
+ break;
+ }
+ if (code == NULL) {
+ snprintf(codebuf, sizeof(sigbuf), "%d", info->si_code);
+ code = codebuf;
+ }
+
+ error_report("QEMU internal SIG%s {code=%s, addr=%p}",
+ sig, code, info->si_addr);
+ die_with_signal(info->si_signo);
+}
+
+static void host_sigsegv_handler(CPUState *cpu, siginfo_t *info,
+ host_sigcontext *uc)
+{
+ uintptr_t host_addr = (uintptr_t)info->si_addr;
+ /*
+ * Convert forcefully to guest address space: addresses outside
+ * reserved_va are still valid to report via SEGV_MAPERR.
+ */
+ bool is_valid = h2g_valid(host_addr);
+ abi_ptr guest_addr = h2g_nocheck(host_addr);
+ uintptr_t pc = host_signal_pc(uc);
+ bool is_write = host_signal_write(info, uc);
+ MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
+ bool maperr;
+
+ /* If this was a write to a TB protected page, restart. */
+ if (is_write
+ && is_valid
+ && info->si_code == SEGV_ACCERR
+ && handle_sigsegv_accerr_write(cpu, host_signal_mask(uc),
+ pc, guest_addr)) {
+ return;
+ }
+
+ /*
+ * If the access was not on behalf of the guest, within the executable
+ * mapping of the generated code buffer, then it is a host bug.
+ */
+ if (access_type != MMU_INST_FETCH
+ && !in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
+ die_from_signal(info);
+ }
+
+ maperr = true;
+ if (is_valid && info->si_code == SEGV_ACCERR) {
+ /*
+ * With reserved_va, the whole address space is PROT_NONE,
+ * which means that we may get ACCERR when we want MAPERR.
+ */
+ if (page_get_flags(guest_addr) & PAGE_VALID) {
+ maperr = false;
+ } else {
+ info->si_code = SEGV_MAPERR;
+ }
+ }
+
+ sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
+ cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
+}
+
+static void host_sigbus_handler(CPUState *cpu, siginfo_t *info,
+ host_sigcontext *uc)
+{
+ uintptr_t pc = host_signal_pc(uc);
+ bool is_write = host_signal_write(info, uc);
+ MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
+
+ /*
+ * If the access was not on behalf of the guest, within the executable
+ * mapping of the generated code buffer, then it is a host bug.
+ */
+ if (!in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
+ die_from_signal(info);
+ }
+
+ if (info->si_code == BUS_ADRALN) {
+ uintptr_t host_addr = (uintptr_t)info->si_addr;
+ abi_ptr guest_addr = h2g_nocheck(host_addr);
+
+ sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
+ cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
+ }
+}
+
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{
CPUState *cpu = thread_cpu;
@@ -786,61 +961,28 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
int guest_sig;
uintptr_t pc = 0;
bool sync_sig = false;
- void *sigmask = host_signal_mask(uc);
+ void *sigmask;
/*
* Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
- * handling wrt signal blocking and unwinding.
+ * handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
+ * SIGFPE, SIGTRAP are always host bugs.
*/
- if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
- MMUAccessType access_type;
- uintptr_t host_addr;
- abi_ptr guest_addr;
- bool is_write;
-
- host_addr = (uintptr_t)info->si_addr;
-
- /*
- * Convert forcefully to guest address space: addresses outside
- * reserved_va are still valid to report via SEGV_MAPERR.
- */
- guest_addr = h2g_nocheck(host_addr);
-
- pc = host_signal_pc(uc);
- is_write = host_signal_write(info, uc);
- access_type = adjust_signal_pc(&pc, is_write);
-
- if (host_sig == SIGSEGV) {
- bool maperr = true;
-
- if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
- /* If this was a write to a TB protected page, restart. */
- if (is_write &&
- handle_sigsegv_accerr_write(cpu, sigmask, pc, guest_addr)) {
- return;
- }
-
- /*
- * With reserved_va, the whole address space is PROT_NONE,
- * which means that we may get ACCERR when we want MAPERR.
- */
- if (page_get_flags(guest_addr) & PAGE_VALID) {
- maperr = false;
- } else {
- info->si_code = SEGV_MAPERR;
- }
- }
-
- sigprocmask(SIG_SETMASK, sigmask, NULL);
- cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
- } else {
- sigprocmask(SIG_SETMASK, sigmask, NULL);
- if (info->si_code == BUS_ADRALN) {
- cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
- }
+ if (info->si_code > 0) {
+ switch (host_sig) {
+ case SIGSEGV:
+ /* Only returns on handle_sigsegv_accerr_write success. */
+ host_sigsegv_handler(cpu, info, uc);
+ return;
+ case SIGBUS:
+ host_sigbus_handler(cpu, info, uc);
+ sync_sig = true;
+ break;
+ case SIGILL:
+ case SIGFPE:
+ case SIGTRAP:
+ die_from_signal(info);
}
-
- sync_sig = true;
}
/* get target signal number */
@@ -881,6 +1023,7 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
* would write 0xff bytes off the end of the structure and trash
* data on the struct.
*/
+ sigmask = host_signal_mask(uc);
memset(sigmask, 0xff, SIGSET_T_SIZE);
sigdelset(sigmask, SIGSEGV);
sigdelset(sigmask, SIGBUS);
@@ -936,7 +1079,6 @@ int do_sigaction(int sig, const struct target_sigaction *act,
struct target_sigaction *oact, abi_ulong ka_restorer)
{
struct target_sigaction *k;
- struct sigaction act1;
int host_sig;
int ret = 0;
@@ -996,22 +1138,27 @@ int do_sigaction(int sig, const struct target_sigaction *act,
return 0;
}
if (host_sig != SIGSEGV && host_sig != SIGBUS) {
+ struct sigaction act1;
+
sigfillset(&act1.sa_mask);
act1.sa_flags = SA_SIGINFO;
- if (k->sa_flags & TARGET_SA_RESTART)
- act1.sa_flags |= SA_RESTART;
- /* NOTE: it is important to update the host kernel signal
- ignore state to avoid getting unexpected interrupted
- syscalls */
if (k->_sa_handler == TARGET_SIG_IGN) {
+ /*
+ * It is important to update the host kernel signal ignore
+ * state to avoid getting unexpected interrupted syscalls.
+ */
act1.sa_sigaction = (void *)SIG_IGN;
} else if (k->_sa_handler == TARGET_SIG_DFL) {
- if (fatal_signal (sig))
+ if (core_dump_signal(sig)) {
act1.sa_sigaction = host_signal_handler;
- else
+ } else {
act1.sa_sigaction = (void *)SIG_DFL;
+ }
} else {
act1.sa_sigaction = host_signal_handler;
+ if (k->sa_flags & TARGET_SA_RESTART) {
+ act1.sa_flags |= SA_RESTART;
+ }
}
ret = sigaction(host_sig, &act1, NULL);
}
diff --git a/linux-user/sparc/target_syscall.h b/linux-user/sparc/target_syscall.h
index be77e44eb8..e421165357 100644
--- a/linux-user/sparc/target_syscall.h
+++ b/linux-user/sparc/target_syscall.h
@@ -50,11 +50,7 @@ static inline abi_ulong target_shmlba(CPUSPARCState *env)
#ifdef TARGET_SPARC64
return MAX(TARGET_PAGE_SIZE, 16 * 1024);
#else
- if (!(env->def.features & CPU_FEATURE_FLUSH)) {
- return 64 * 1024;
- } else {
- return 256 * 1024;
- }
+ return 256 * 1024;
#endif
}
diff --git a/meson.build b/meson.build
index 259dc5f308..dcef8b1e79 100644
--- a/meson.build
+++ b/meson.build
@@ -2135,6 +2135,7 @@ config_host_data.set('CONFIG_TPM', have_tpm)
config_host_data.set('CONFIG_TSAN', get_option('tsan'))
config_host_data.set('CONFIG_USB_LIBUSB', libusb.found())
config_host_data.set('CONFIG_VDE', vde.found())
+config_host_data.set('CONFIG_VHOST', have_vhost)
config_host_data.set('CONFIG_VHOST_NET', have_vhost_net)
config_host_data.set('CONFIG_VHOST_NET_USER', have_vhost_net_user)
config_host_data.set('CONFIG_VHOST_NET_VDPA', have_vhost_net_vdpa)
@@ -4087,8 +4088,10 @@ if 'cpp' in all_languages
else
summary_info += {'C++ compiler': false}
endif
-if targetos == 'darwin'
+if 'objc' in all_languages
summary_info += {'Objective-C compiler': ' '.join(meson.get_compiler('objc').cmd_array())}
+else
+ summary_info += {'Objective-C compiler': false}
endif
option_cflags = (get_option('debug') ? ['-g'] : [])
if get_option('optimization') != 'plain'
@@ -4098,7 +4101,7 @@ summary_info += {'CFLAGS': ' '.join(get_option('c_args') + option_cfl
if 'cpp' in all_languages
summary_info += {'CXXFLAGS': ' '.join(get_option('cpp_args') + option_cflags)}
endif
-if targetos == 'darwin'
+if 'objc' in all_languages
summary_info += {'OBJCFLAGS': ' '.join(get_option('objc_args') + option_cflags)}
endif
link_args = get_option('c_link_args')
diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c
index d206700a43..a82597f18e 100644
--- a/migration/migration-hmp-cmds.c
+++ b/migration/migration-hmp-cmds.c
@@ -30,6 +30,7 @@
#include "sysemu/runstate.h"
#include "ui/qemu-spice.h"
#include "sysemu/sysemu.h"
+#include "options.h"
#include "migration.h"
static void migration_global_dump(Monitor *mon)
@@ -696,7 +697,6 @@ void hmp_x_colo_lost_heartbeat(Monitor *mon, const QDict *qdict)
typedef struct HMPMigrationStatus {
QEMUTimer *timer;
Monitor *mon;
- bool is_block_migration;
} HMPMigrationStatus;
static void hmp_migrate_status_cb(void *opaque)
@@ -722,7 +722,7 @@ static void hmp_migrate_status_cb(void *opaque)
timer_mod(status->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
} else {
- if (status->is_block_migration) {
+ if (migrate_block()) {
monitor_printf(status->mon, "\n");
}
if (info->error_desc) {
@@ -762,7 +762,6 @@ void hmp_migrate(Monitor *mon, const QDict *qdict)
status = g_malloc0(sizeof(*status));
status->mon = mon;
- status->is_block_migration = blk || inc;
status->timer = timer_new_ms(QEMU_CLOCK_REALTIME, hmp_migrate_status_cb,
status);
timer_mod(status->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
diff --git a/migration/migration.c b/migration/migration.c
index 6ba5e145ac..67547eb6a1 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -447,6 +447,18 @@ static void qemu_start_incoming_migration(const char *uri, Error **errp)
socket_start_incoming_migration(p ? p : uri, errp);
#ifdef CONFIG_RDMA
} else if (strstart(uri, "rdma:", &p)) {
+ if (migrate_compress()) {
+ error_setg(errp, "RDMA and compression can't be used together");
+ return;
+ }
+ if (migrate_xbzrle()) {
+ error_setg(errp, "RDMA and XBZRLE can't be used together");
+ return;
+ }
+ if (migrate_multifd()) {
+ error_setg(errp, "RDMA and multifd can't be used together");
+ return;
+ }
rdma_start_incoming_migration(p, errp);
#endif
} else if (strstart(uri, "exec:", &p)) {
@@ -962,16 +974,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
info->xbzrle_cache->overflow = xbzrle_counters.overflow;
}
- if (migrate_compress()) {
- info->compression = g_malloc0(sizeof(*info->compression));
- info->compression->pages = compression_counters.pages;
- info->compression->busy = compression_counters.busy;
- info->compression->busy_rate = compression_counters.busy_rate;
- info->compression->compressed_size =
- compression_counters.compressed_size;
- info->compression->compression_rate =
- compression_counters.compression_rate;
- }
+ populate_compress(info);
if (cpu_throttle_active()) {
info->has_cpu_throttle_percentage = true;
@@ -1204,7 +1207,7 @@ static void migrate_fd_cleanup(MigrationState *s)
/* It is used on info migrate. We can't free it */
error_report_err(error_copy(s->error));
}
- notifier_list_notify(&migration_state_notifiers, s);
+ migration_call_notifiers(s);
block_cleanup_parameters();
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
}
@@ -1308,14 +1311,24 @@ static void migrate_fd_cancel(MigrationState *s)
}
}
-void add_migration_state_change_notifier(Notifier *notify)
+void migration_add_notifier(Notifier *notify,
+ void (*func)(Notifier *notifier, void *data))
{
+ notify->notify = func;
notifier_list_add(&migration_state_notifiers, notify);
}
-void remove_migration_state_change_notifier(Notifier *notify)
+void migration_remove_notifier(Notifier *notify)
{
- notifier_remove(notify);
+ if (notify->notify) {
+ notifier_remove(notify);
+ notify->notify = NULL;
+ }
+}
+
+void migration_call_notifiers(MigrationState *s)
+{
+ notifier_list_notify(&migration_state_notifiers, s);
}
bool migration_in_setup(MigrationState *s)
@@ -1454,45 +1467,49 @@ int migrate_init(MigrationState *s, Error **errp)
s->switchover_acked = false;
s->rdma_migration = false;
/*
- * set mig_stats compression_counters memory to zero for a
- * new migration
+ * set mig_stats memory to zero for a new migration
*/
memset(&mig_stats, 0, sizeof(mig_stats));
- memset(&compression_counters, 0, sizeof(compression_counters));
migration_reset_vfio_bytes_transferred();
return 0;
}
-int migrate_add_blocker_internal(Error *reason, Error **errp)
+int migrate_add_blocker_internal(Error **reasonp, Error **errp)
{
/* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */
if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
- error_propagate_prepend(errp, error_copy(reason),
+ error_propagate_prepend(errp, *reasonp,
"disallowing migration blocker "
"(migration/snapshot in progress) for: ");
+ *reasonp = NULL;
return -EBUSY;
}
- migration_blockers = g_slist_prepend(migration_blockers, reason);
+ migration_blockers = g_slist_prepend(migration_blockers, *reasonp);
return 0;
}
-int migrate_add_blocker(Error *reason, Error **errp)
+int migrate_add_blocker(Error **reasonp, Error **errp)
{
if (only_migratable) {
- error_propagate_prepend(errp, error_copy(reason),
+ error_propagate_prepend(errp, *reasonp,
"disallowing migration blocker "
"(--only-migratable) for: ");
+ *reasonp = NULL;
return -EACCES;
}
- return migrate_add_blocker_internal(reason, errp);
+ return migrate_add_blocker_internal(reasonp, errp);
}
-void migrate_del_blocker(Error *reason)
+void migrate_del_blocker(Error **reasonp)
{
- migration_blockers = g_slist_remove(migration_blockers, reason);
+ if (*reasonp) {
+ migration_blockers = g_slist_remove(migration_blockers, *reasonp);
+ error_free(*reasonp);
+ *reasonp = NULL;
+ }
}
void qmp_migrate_incoming(const char *uri, Error **errp)
@@ -2226,7 +2243,7 @@ static int postcopy_start(MigrationState *ms, Error **errp)
* spice needs to trigger a transition now
*/
ms->postcopy_after_devices = true;
- notifier_list_notify(&migration_state_notifiers, ms);
+ migration_call_notifiers(ms);
ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
@@ -3306,7 +3323,7 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
rate_limit = migrate_max_bandwidth();
/* Notify before starting migration thread */
- notifier_list_notify(&migration_state_notifiers, s);
+ migration_call_notifiers(s);
}
migration_rate_set(rate_limit);
diff --git a/migration/multifd.c b/migration/multifd.c
index 1fe53d3b98..e2a45c667a 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -743,9 +743,6 @@ static void *multifd_send_thread(void *opaque)
if (flags & MULTIFD_FLAG_SYNC) {
qemu_sem_post(&p->sem_sync);
}
- } else if (p->quit) {
- qemu_mutex_unlock(&p->mutex);
- break;
} else {
qemu_mutex_unlock(&p->mutex);
/* sometimes there are spurious wakeups */
diff --git a/migration/ram-compress.c b/migration/ram-compress.c
index 06254d8c69..d037dfe6cf 100644
--- a/migration/ram-compress.c
+++ b/migration/ram-compress.c
@@ -32,11 +32,14 @@
#include "ram-compress.h"
#include "qemu/error-report.h"
+#include "qemu/stats64.h"
#include "migration.h"
#include "options.h"
#include "io/channel-null.h"
#include "exec/target_page.h"
#include "exec/ramblock.h"
+#include "ram.h"
+#include "migration-stats.h"
CompressionStats compression_counters;
@@ -227,27 +230,25 @@ static inline void compress_reset_result(CompressParam *param)
void flush_compressed_data(int (send_queued_data(CompressParam *)))
{
- int idx, thread_count;
-
- thread_count = migrate_compress_threads();
+ int thread_count = migrate_compress_threads();
qemu_mutex_lock(&comp_done_lock);
- for (idx = 0; idx < thread_count; idx++) {
- while (!comp_param[idx].done) {
+ for (int i = 0; i < thread_count; i++) {
+ while (!comp_param[i].done) {
qemu_cond_wait(&comp_done_cond, &comp_done_lock);
}
}
qemu_mutex_unlock(&comp_done_lock);
- for (idx = 0; idx < thread_count; idx++) {
- qemu_mutex_lock(&comp_param[idx].mutex);
- if (!comp_param[idx].quit) {
- CompressParam *param = &comp_param[idx];
+ for (int i = 0; i < thread_count; i++) {
+ qemu_mutex_lock(&comp_param[i].mutex);
+ if (!comp_param[i].quit) {
+ CompressParam *param = &comp_param[i];
send_queued_data(param);
assert(qemu_file_buffer_empty(param->file));
compress_reset_result(param);
}
- qemu_mutex_unlock(&comp_param[idx].mutex);
+ qemu_mutex_unlock(&comp_param[i].mutex);
}
}
@@ -262,15 +263,15 @@ static inline void set_compress_params(CompressParam *param, RAMBlock *block,
int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset,
int (send_queued_data(CompressParam *)))
{
- int idx, thread_count, pages = -1;
+ int thread_count, pages = -1;
bool wait = migrate_compress_wait_thread();
thread_count = migrate_compress_threads();
qemu_mutex_lock(&comp_done_lock);
retry:
- for (idx = 0; idx < thread_count; idx++) {
- if (comp_param[idx].done) {
- CompressParam *param = &comp_param[idx];
+ for (int i = 0; i < thread_count; i++) {
+ if (comp_param[i].done) {
+ CompressParam *param = &comp_param[i];
qemu_mutex_lock(&param->mutex);
param->done = false;
send_queued_data(param);
@@ -364,16 +365,14 @@ static void *do_data_decompress(void *opaque)
int wait_for_decompress_done(void)
{
- int idx, thread_count;
-
if (!migrate_compress()) {
return 0;
}
- thread_count = migrate_decompress_threads();
+ int thread_count = migrate_decompress_threads();
qemu_mutex_lock(&decomp_done_lock);
- for (idx = 0; idx < thread_count; idx++) {
- while (!decomp_param[idx].done) {
+ for (int i = 0; i < thread_count; i++) {
+ while (!decomp_param[i].done) {
qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
}
}
@@ -430,6 +429,11 @@ int compress_threads_load_setup(QEMUFile *f)
return 0;
}
+ /*
+ * set compression_counters memory to zero for a new migration
+ */
+ memset(&compression_counters, 0, sizeof(compression_counters));
+
thread_count = migrate_decompress_threads();
decompress_threads = g_new0(QemuThread, thread_count);
decomp_param = g_new0(DecompressParam, thread_count);
@@ -459,27 +463,54 @@ exit:
void decompress_data_with_multi_threads(QEMUFile *f, void *host, int len)
{
- int idx, thread_count;
-
- thread_count = migrate_decompress_threads();
+ int thread_count = migrate_decompress_threads();
QEMU_LOCK_GUARD(&decomp_done_lock);
while (true) {
- for (idx = 0; idx < thread_count; idx++) {
- if (decomp_param[idx].done) {
- decomp_param[idx].done = false;
- qemu_mutex_lock(&decomp_param[idx].mutex);
- qemu_get_buffer(f, decomp_param[idx].compbuf, len);
- decomp_param[idx].des = host;
- decomp_param[idx].len = len;
- qemu_cond_signal(&decomp_param[idx].cond);
- qemu_mutex_unlock(&decomp_param[idx].mutex);
- break;
+ for (int i = 0; i < thread_count; i++) {
+ if (decomp_param[i].done) {
+ decomp_param[i].done = false;
+ qemu_mutex_lock(&decomp_param[i].mutex);
+ qemu_get_buffer(f, decomp_param[i].compbuf, len);
+ decomp_param[i].des = host;
+ decomp_param[i].len = len;
+ qemu_cond_signal(&decomp_param[i].cond);
+ qemu_mutex_unlock(&decomp_param[i].mutex);
+ return;
}
}
- if (idx < thread_count) {
- break;
- } else {
- qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
- }
+ qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
+ }
+}
+
+void populate_compress(MigrationInfo *info)
+{
+ if (!migrate_compress()) {
+ return;
+ }
+ info->compression = g_malloc0(sizeof(*info->compression));
+ info->compression->pages = compression_counters.pages;
+ info->compression->busy = compression_counters.busy;
+ info->compression->busy_rate = compression_counters.busy_rate;
+ info->compression->compressed_size = compression_counters.compressed_size;
+ info->compression->compression_rate = compression_counters.compression_rate;
+}
+
+uint64_t ram_compressed_pages(void)
+{
+ return compression_counters.pages;
+}
+
+void update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
+{
+ ram_transferred_add(bytes_xmit);
+
+ if (param->result == RES_ZEROPAGE) {
+ stat64_add(&mig_stats.zero_pages, 1);
+ return;
}
+
+ /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
+ compression_counters.compressed_size += bytes_xmit - 8;
+ compression_counters.pages++;
}
+
diff --git a/migration/ram-compress.h b/migration/ram-compress.h
index 6f7fe2f472..e55d3b50bd 100644
--- a/migration/ram-compress.h
+++ b/migration/ram-compress.h
@@ -30,6 +30,7 @@
#define QEMU_MIGRATION_COMPRESS_H
#include "qemu-file.h"
+#include "qapi/qapi-types-migration.h"
enum CompressResult {
RES_NONE = 0,
@@ -67,4 +68,8 @@ void compress_threads_load_cleanup(void);
int compress_threads_load_setup(QEMUFile *f);
void decompress_data_with_multi_threads(QEMUFile *f, void *host, int len);
+void populate_compress(MigrationInfo *info);
+uint64_t ram_compressed_pages(void);
+void update_compress_thread_counts(const CompressParam *param, int bytes_xmit);
+
#endif
diff --git a/migration/ram.c b/migration/ram.c
index c844151ee9..92769902bb 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -939,7 +939,7 @@ uint64_t ram_get_total_transferred_pages(void)
{
return stat64_get(&mig_stats.normal_pages) +
stat64_get(&mig_stats.zero_pages) +
- compression_counters.pages + xbzrle_counters.pages;
+ ram_compressed_pages() + xbzrle_counters.pages;
}
static void migration_update_rates(RAMState *rs, int64_t end_time)
@@ -1144,13 +1144,12 @@ void ram_release_page(const char *rbname, uint64_t offset)
*
* @rs: current RAM state
* @pss: current PSS channel
- * @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
-static int save_zero_page(RAMState *rs, PageSearchStatus *pss, RAMBlock *block,
+static int save_zero_page(RAMState *rs, PageSearchStatus *pss,
ram_addr_t offset)
{
- uint8_t *p = block->host + offset;
+ uint8_t *p = pss->block->host + offset;
QEMUFile *file = pss->pss_channel;
int len = 0;
@@ -1158,10 +1157,10 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss, RAMBlock *block,
return 0;
}
- len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO);
+ len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO);
qemu_put_byte(file, 0);
len += 1;
- ram_release_page(block->idstr, offset);
+ ram_release_page(pss->block->idstr, offset);
stat64_add(&mig_stats.zero_pages, 1);
ram_transferred_add(len);
@@ -1172,7 +1171,7 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss, RAMBlock *block,
*/
if (rs->xbzrle_started) {
XBZRLE_cache_lock();
- xbzrle_cache_zero_page(block->offset + offset);
+ xbzrle_cache_zero_page(pss->block->offset + offset);
XBZRLE_cache_unlock();
}
@@ -1186,12 +1185,12 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss, RAMBlock *block,
*
* Return true if the pages has been saved, otherwise false is returned.
*/
-static bool control_save_page(PageSearchStatus *pss, RAMBlock *block,
+static bool control_save_page(PageSearchStatus *pss,
ram_addr_t offset, int *pages)
{
int ret;
- ret = rdma_control_save_page(pss->pss_channel, block->offset, offset,
+ ret = rdma_control_save_page(pss->pss_channel, pss->block->offset, offset,
TARGET_PAGE_SIZE);
if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
return false;
@@ -1292,21 +1291,6 @@ static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block,
return 1;
}
-static void
-update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
-{
- ram_transferred_add(bytes_xmit);
-
- if (param->result == RES_ZEROPAGE) {
- stat64_add(&mig_stats.zero_pages, 1);
- return;
- }
-
- /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
- compression_counters.compressed_size += bytes_xmit - 8;
- compression_counters.pages++;
-}
-
static bool save_page_use_compression(RAMState *rs);
static int send_queued_data(CompressParam *param)
@@ -2082,7 +2066,7 @@ static bool save_page_use_compression(RAMState *rs)
* paths to handle it
*/
static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
- RAMBlock *block, ram_addr_t offset)
+ ram_addr_t offset)
{
if (!save_page_use_compression(rs)) {
return false;
@@ -2098,12 +2082,13 @@ static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
* We post the fist page as normal page as compression will take
* much CPU resource.
*/
- if (block != pss->last_sent_block) {
+ if (pss->block != pss->last_sent_block) {
ram_flush_compressed_data(rs);
return false;
}
- if (compress_page_with_multi_thread(block, offset, send_queued_data) > 0) {
+ if (compress_page_with_multi_thread(pss->block, offset,
+ send_queued_data) > 0) {
return true;
}
@@ -2125,15 +2110,15 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
int res;
- if (control_save_page(pss, block, offset, &res)) {
+ if (control_save_page(pss, offset, &res)) {
return res;
}
- if (save_compress_page(rs, pss, block, offset)) {
+ if (save_compress_page(rs, pss, offset)) {
return 1;
}
- if (save_zero_page(rs, pss, block, offset)) {
+ if (save_zero_page(rs, pss, offset)) {
return 1;
}
@@ -3888,6 +3873,7 @@ static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
ret = qemu_ram_resize(block, length, &local_err);
if (local_err) {
error_report_err(local_err);
+ return ret;
}
}
/* For postcopy we need to check hugepage sizes match */
@@ -3898,7 +3884,7 @@ static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
error_report("Mismatched RAM page size %s "
"(local) %zd != %" PRId64, block->idstr,
block->page_size, remote_page_size);
- ret = -EINVAL;
+ return -EINVAL;
}
}
if (migrate_ignore_shared()) {
@@ -3908,7 +3894,7 @@ static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
error_report("Mismatched GPAs for block %s "
"%" PRId64 "!= %" PRId64, block->idstr,
(uint64_t)addr, (uint64_t)block->mr->addr);
- ret = -EINVAL;
+ return -EINVAL;
}
}
ret = rdma_block_notification_handle(f, block->idstr);
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 939c984d5b..7a226c93bc 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -339,7 +339,8 @@ static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
{
struct vhost_vdpa *v = &s->vhost_vdpa;
- add_migration_state_change_notifier(&s->migration_state);
+ migration_add_notifier(&s->migration_state,
+ vdpa_net_migration_state_notifier);
if (v->shadow_vqs_enabled) {
v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
v->iova_range.last);
@@ -399,7 +400,7 @@ static void vhost_vdpa_net_client_stop(NetClientState *nc)
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
if (s->vhost_vdpa.index == 0) {
- remove_migration_state_change_notifier(&s->migration_state);
+ migration_remove_notifier(&s->migration_state);
}
dev = s->vhost_vdpa.dev;
@@ -618,39 +619,77 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
vhost_vdpa_net_client_stop(nc);
}
-static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
- size_t in_len)
+static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
+ const struct iovec *out_sg, size_t out_num,
+ const struct iovec *in_sg, size_t in_num)
{
- /* Buffers for the device */
- const struct iovec out = {
- .iov_base = s->cvq_cmd_out_buffer,
- .iov_len = out_len,
- };
- const struct iovec in = {
- .iov_base = s->status,
- .iov_len = sizeof(virtio_net_ctrl_ack),
- };
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
int r;
- r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
+ r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
if (unlikely(r != 0)) {
if (unlikely(r == -ENOSPC)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
__func__);
}
- return r;
}
- /*
- * We can poll here since we've had BQL from the time we sent the
- * descriptor. Also, we need to take the answer before SVQ pulls by itself,
- * when BQL is released
- */
- return vhost_svq_poll(svq, 1);
+ return r;
+}
+
+/*
+ * Convenience wrapper to poll SVQ for multiple control commands.
+ *
+ * Caller should hold the BQL when invoking this function, and should take
+ * the answer before SVQ pulls by itself when BQL is released.
+ */
+static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
+{
+ VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
+ return vhost_svq_poll(svq, cmds_in_flight);
+}
+
+static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
+{
+ /* reset the cursor of the output buffer for the device */
+ out_cursor->iov_base = s->cvq_cmd_out_buffer;
+ out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
+
+ /* reset the cursor of the in buffer for the device */
+ in_cursor->iov_base = s->status;
+ in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
+}
+
+/*
+ * Poll SVQ for multiple pending control commands and check the device's ack.
+ *
+ * Caller should hold the BQL when invoking this function.
+ *
+ * @s: The VhostVDPAState
+ * @len: The length of the pending status shadow buffer
+ */
+static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
+{
+ /* device uses a one-byte length ack for each control command */
+ ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
+ if (unlikely(dev_written != len)) {
+ return -EIO;
+ }
+
+ /* check the device's ack */
+ for (int i = 0; i < len; ++i) {
+ if (s->status[i] != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+ }
+ return 0;
}
-static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
+static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor, uint8_t class,
uint8_t cmd, const struct iovec *data_sg,
size_t data_num)
{
@@ -658,36 +697,72 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
.class = class,
.cmd = cmd,
};
- size_t data_size = iov_size(data_sg, data_num);
+ size_t data_size = iov_size(data_sg, data_num), cmd_size;
+ struct iovec out, in;
+ ssize_t r;
+ unsigned dummy_cursor_iov_cnt;
+ VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
+ cmd_size = sizeof(ctrl) + data_size;
+ if (vhost_svq_available_slots(svq) < 2 ||
+ iov_size(out_cursor, 1) < cmd_size) {
+ /*
+ * It is time to flush all pending control commands if SVQ is full
+ * or control commands shadow buffers are full.
+ *
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
+ (void *)s->status);
+ if (unlikely(r < 0)) {
+ return r;
+ }
- /* pack the CVQ command header */
- memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
+ vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
+ }
+ /* pack the CVQ command header */
+ iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
/* pack the CVQ command command-specific-data */
iov_to_buf(data_sg, data_num, 0,
- s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
+ out_cursor->iov_base + sizeof(ctrl), data_size);
+
+ /* extract the required buffer from the cursor for output */
+ iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
+ /* extract the required buffer from the cursor for input */
+ iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
+
+ r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ /* iterate the cursors */
+ dummy_cursor_iov_cnt = 1;
+ iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
+ dummy_cursor_iov_cnt = 1;
+ iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
- return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
- sizeof(virtio_net_ctrl_ack));
+ return 0;
}
-static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
+static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
const struct iovec data = {
.iov_base = (void *)n->mac,
.iov_len = sizeof(n->mac),
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_ADDR_SET,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
}
@@ -732,25 +807,24 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
.iov_len = mul_macs_size,
},
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
- VIRTIO_NET_CTRL_MAC,
- VIRTIO_NET_CTRL_MAC_TABLE_SET,
- data, ARRAY_SIZE(data));
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_TABLE_SET,
+ data, ARRAY_SIZE(data));
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
}
static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
- const VirtIONet *n)
+ const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
struct virtio_net_ctrl_mq mq;
- ssize_t dev_written;
+ ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
return 0;
@@ -761,24 +835,24 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
.iov_base = &mq,
.iov_len = sizeof(mq),
};
- dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
- VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_MQ,
+ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
}
static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
- const VirtIONet *n)
+ const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
uint64_t offloads;
- ssize_t dev_written;
+ ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
@@ -806,20 +880,20 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
.iov_base = &offloads,
.iov_len = sizeof(offloads),
};
- dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
- VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_GUEST_OFFLOADS,
+ VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
}
static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor,
uint8_t cmd,
uint8_t on)
{
@@ -827,14 +901,23 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
.iov_base = &on,
.iov_len = sizeof(on),
};
- return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
- cmd, &data, 1);
+ ssize_t r;
+
+ r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX, cmd, &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ return 0;
}
static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
- const VirtIONet *n)
+ const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
- ssize_t dev_written;
+ ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
return 0;
@@ -859,13 +942,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (!n->mac_table.uni_overflow && !n->promisc) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_PROMISC, 0);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_PROMISC, 0);
+ if (unlikely(r < 0)) {
+ return r;
}
}
@@ -887,13 +967,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->mac_table.multi_overflow || n->allmulti) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
}
@@ -912,13 +989,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->alluni) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_ALLUNI, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_ALLUNI, 1);
+ if (r < 0) {
+ return r;
}
}
@@ -933,13 +1007,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nomulti) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_NOMULTI, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_NOMULTI, 1);
+ if (r < 0) {
+ return r;
}
}
@@ -954,13 +1025,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nouni) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_NOUNI, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_NOUNI, 1);
+ if (r < 0) {
+ return r;
}
}
@@ -975,13 +1043,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nobcast) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_NOBCAST, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_RX_NOBCAST, 1);
+ if (r < 0) {
+ return r;
}
}
@@ -990,27 +1055,29 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor,
uint16_t vid)
{
const struct iovec data = {
.iov_base = &vid,
.iov_len = sizeof(vid),
};
- ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
- VIRTIO_NET_CTRL_VLAN_ADD,
- &data, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (unlikely(*s->status != VIRTIO_NET_OK)) {
- return -EIO;
+ ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
+ VIRTIO_NET_CTRL_VLAN,
+ VIRTIO_NET_CTRL_VLAN_ADD,
+ &data, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
return 0;
}
static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
- const VirtIONet *n)
+ const VirtIONet *n,
+ struct iovec *out_cursor,
+ struct iovec *in_cursor)
{
int r;
@@ -1021,7 +1088,8 @@ static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
for (int i = 0; i < MAX_VLAN >> 5; i++) {
for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
if (n->vlans[i] & (1U << j)) {
- r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
+ r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
+ in_cursor, (i << 5) + j);
if (unlikely(r != 0)) {
return r;
}
@@ -1038,6 +1106,7 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc)
struct vhost_vdpa *v = &s->vhost_vdpa;
const VirtIONet *n;
int r;
+ struct iovec out_cursor, in_cursor;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
@@ -1045,23 +1114,35 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc)
if (v->shadow_vqs_enabled) {
n = VIRTIO_NET(v->dev->vdev);
- r = vhost_vdpa_net_load_mac(s, n);
+ vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
+ r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
if (unlikely(r < 0)) {
return r;
}
- r = vhost_vdpa_net_load_mq(s, n);
+ r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
- r = vhost_vdpa_net_load_offloads(s, n);
+ r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
- r = vhost_vdpa_net_load_rx(s, n);
+ r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
- r = vhost_vdpa_net_load_vlan(s, n);
+ r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
+ if (unlikely(r)) {
+ return r;
+ }
+
+ /*
+ * We need to poll and check all pending device's used buffers.
+ *
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
if (unlikely(r)) {
return r;
}
@@ -1114,12 +1195,14 @@ static NetClientInfo net_vhost_vdpa_cvq_info = {
*/
static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
VirtQueueElement *elem,
- struct iovec *out)
+ struct iovec *out,
+ const struct iovec *in)
{
struct virtio_net_ctrl_mac mac_data, *mac_ptr;
struct virtio_net_ctrl_hdr *hdr_ptr;
uint32_t cursor;
ssize_t r;
+ uint8_t on = 1;
/* parse the non-multicast MAC address entries from CVQ command */
cursor = sizeof(*hdr_ptr);
@@ -1167,10 +1250,25 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
* filter table to the vdpa device, it should send the
* VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
*/
- r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
+ hdr_ptr = out->iov_base;
+ out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
+
+ hdr_ptr->class = VIRTIO_NET_CTRL_RX;
+ hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
+ iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
+ r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
if (unlikely(r < 0)) {
return r;
}
+
+ /*
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ r = vhost_vdpa_net_svq_poll(s, 1);
+ if (unlikely(r < sizeof(*s->status))) {
+ return r;
+ }
if (*s->status != VIRTIO_NET_OK) {
return sizeof(*s->status);
}
@@ -1248,10 +1346,15 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
.iov_base = s->cvq_cmd_out_buffer,
};
/* in buffer used for device model */
- const struct iovec in = {
+ const struct iovec model_in = {
.iov_base = &status,
.iov_len = sizeof(status),
};
+ /* in buffer used for vdpa device */
+ const struct iovec vdpa_in = {
+ .iov_base = s->status,
+ .iov_len = sizeof(*s->status),
+ };
ssize_t dev_written = -EINVAL;
out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
@@ -1280,15 +1383,23 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
* the CVQ command directly.
*/
dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
- &out);
+ &out, &vdpa_in);
if (unlikely(dev_written < 0)) {
goto out;
}
} else {
- dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
- if (unlikely(dev_written < 0)) {
+ ssize_t r;
+ r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
+ if (unlikely(r < 0)) {
+ dev_written = r;
goto out;
}
+
+ /*
+ * We can poll here since we've had BQL from the time
+ * we sent the descriptor.
+ */
+ dev_written = vhost_vdpa_net_svq_poll(s, 1);
}
if (unlikely(dev_written < sizeof(status))) {
@@ -1301,7 +1412,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
}
status = VIRTIO_NET_ERR;
- virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
+ virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
if (status != VIRTIO_NET_OK) {
error_report("Bad CVQ processing in model");
}
@@ -1456,7 +1567,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.device_fd = vdpa_device_fd;
s->vhost_vdpa.index = queue_pair_index;
s->always_svq = svq;
- s->migration_state.notify = vdpa_net_migration_state_notifier;
+ s->migration_state.notify = NULL;
s->vhost_vdpa.shadow_vqs_enabled = svq;
s->vhost_vdpa.iova_range = iova_range;
s->vhost_vdpa.shadow_data = svq;
diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img
index c7196143b1..e976c0cc93 100644
--- a/pc-bios/hppa-firmware.img
+++ b/pc-bios/hppa-firmware.img
Binary files differ
diff --git a/qapi/compat.json b/qapi/compat.json
index f4c19837eb..42034d9368 100644
--- a/qapi/compat.json
+++ b/qapi/compat.json
@@ -43,8 +43,8 @@
# This is intended for testing users of the management interfaces.
#
# Limitation: covers only syntactic aspects of QMP, i.e. stuff tagged
-# with feature 'deprecated'. We may want to extend it to cover
-# semantic aspects and CLI.
+# with feature 'deprecated' or 'unstable'. We may want to extend it
+# to cover semantic aspects and CLI.
#
# Limitation: deprecated-output policy @hide is not implemented for
# enumeration values. They behave the same as with policy @accept.
diff --git a/qapi/machine-common.json b/qapi/machine-common.json
new file mode 100644
index 0000000000..fa6bd71d12
--- /dev/null
+++ b/qapi/machine-common.json
@@ -0,0 +1,21 @@
+# -*- Mode: Python -*-
+# vim: filetype=python
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+
+##
+# = Machines S390 data types
+##
+
+##
+# @CpuS390Entitlement:
+#
+# An enumeration of CPU entitlements that can be assumed by a virtual
+# S390 CPU
+#
+# Since: 8.2
+##
+{ 'enum': 'CpuS390Entitlement',
+ 'prefix': 'S390_CPU_ENTITLEMENT',
+ 'data': [ 'auto', 'low', 'medium', 'high' ] }
diff --git a/qapi/machine-target.json b/qapi/machine-target.json
index f0a6b72414..4e55adbe00 100644
--- a/qapi/machine-target.json
+++ b/qapi/machine-target.json
@@ -4,6 +4,8 @@
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
+{ 'include': 'machine-common.json' }
+
##
# @CpuModelInfo:
#
@@ -361,3 +363,122 @@
'TARGET_MIPS',
'TARGET_LOONGARCH64',
'TARGET_RISCV' ] } }
+
+##
+# @CpuS390Polarization:
+#
+# An enumeration of CPU polarization that can be assumed by a virtual
+# S390 CPU
+#
+# Since: 8.2
+##
+{ 'enum': 'CpuS390Polarization',
+ 'prefix': 'S390_CPU_POLARIZATION',
+ 'data': [ 'horizontal', 'vertical' ],
+ 'if': 'TARGET_S390X'
+}
+
+##
+# @set-cpu-topology:
+#
+# Modify the topology by moving the CPU inside the topology tree,
+# or by changing a modifier attribute of a CPU.
+# Absent values will not be modified.
+#
+# @core-id: the vCPU ID to be moved
+#
+# @socket-id: destination socket to move the vCPU to
+#
+# @book-id: destination book to move the vCPU to
+#
+# @drawer-id: destination drawer to move the vCPU to
+#
+# @entitlement: entitlement to set
+#
+# @dedicated: whether the provisioning of real to virtual CPU is dedicated
+#
+# Features:
+#
+# @unstable: This command is experimental.
+#
+# Returns: Nothing on success.
+#
+# Since: 8.2
+##
+{ 'command': 'set-cpu-topology',
+ 'data': {
+ 'core-id': 'uint16',
+ '*socket-id': 'uint16',
+ '*book-id': 'uint16',
+ '*drawer-id': 'uint16',
+ '*entitlement': 'CpuS390Entitlement',
+ '*dedicated': 'bool'
+ },
+ 'features': [ 'unstable' ],
+ 'if': { 'all': [ 'TARGET_S390X' , 'CONFIG_KVM' ] }
+}
+
+##
+# @CPU_POLARIZATION_CHANGE:
+#
+# Emitted when the guest asks to change the polarization.
+#
+# The guest can tell the host (via the PTF instruction) whether the
+# CPUs should be provisioned using horizontal or vertical polarization.
+#
+# On horizontal polarization the host is expected to provision all vCPUs
+# equally.
+#
+# On vertical polarization the host can provision each vCPU differently.
+# The guest will get information on the details of the provisioning
+# the next time it uses the STSI(15) instruction.
+#
+# @polarization: polarization specified by the guest
+#
+# Features:
+#
+# @unstable: This event is experimental.
+#
+# Since: 8.2
+#
+# Example:
+#
+# <- { "event": "CPU_POLARIZATION_CHANGE",
+# "data": { "polarization": "horizontal" },
+# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } }
+##
+{ 'event': 'CPU_POLARIZATION_CHANGE',
+ 'data': { 'polarization': 'CpuS390Polarization' },
+ 'features': [ 'unstable' ],
+ 'if': { 'all': [ 'TARGET_S390X', 'CONFIG_KVM' ] }
+}
+
+##
+# @CpuPolarizationInfo:
+#
+# The result of a CPU polarization query.
+#
+# @polarization: the CPU polarization
+#
+# Since: 8.2
+##
+{ 'struct': 'CpuPolarizationInfo',
+ 'data': { 'polarization': 'CpuS390Polarization' },
+ 'if': { 'all': [ 'TARGET_S390X', 'CONFIG_KVM' ] }
+}
+
+##
+# @query-s390x-cpu-polarization:
+#
+# Features:
+#
+# @unstable: This command is experimental.
+#
+# Returns: the machine's CPU polarization
+#
+# Since: 8.2
+##
+{ 'command': 'query-s390x-cpu-polarization', 'returns': 'CpuPolarizationInfo',
+ 'features': [ 'unstable' ],
+ 'if': { 'all': [ 'TARGET_S390X', 'CONFIG_KVM' ] }
+}
diff --git a/qapi/machine.json b/qapi/machine.json
index a08b6576ca..6c9d2f6dcf 100644
--- a/qapi/machine.json
+++ b/qapi/machine.json
@@ -9,6 +9,7 @@
##
{ 'include': 'common.json' }
+{ 'include': 'machine-common.json' }
##
# @SysEmuTarget:
@@ -56,9 +57,16 @@
#
# @cpu-state: the virtual CPU's state
#
+# @dedicated: the virtual CPU's dedication (since 8.2)
+#
+# @entitlement: the virtual CPU's entitlement (since 8.2)
+#
# Since: 2.12
##
-{ 'struct': 'CpuInfoS390', 'data': { 'cpu-state': 'CpuS390State' } }
+{ 'struct': 'CpuInfoS390',
+ 'data': { 'cpu-state': 'CpuS390State',
+ '*dedicated': 'bool',
+ '*entitlement': 'CpuS390Entitlement' } }
##
# @CpuInfoFast:
@@ -71,8 +79,7 @@
#
# @thread-id: ID of the underlying host thread
#
-# @props: properties describing to which node/socket/core/thread
-# virtual CPU belongs to, provided if supported by board
+# @props: properties associated with a virtual CPU, e.g. the socket id
#
# @target: the QEMU system emulation target, which determines which
# additional fields will be listed (since 3.0)
@@ -899,29 +906,46 @@
# should be passed by management with device_add command when a CPU is
# being hotplugged.
#
+# Which members are optional and which mandatory depends on the
+# architecture and board.
+#
+# For s390x see :ref:`cpu-topology-s390x`.
+#
+# The ids other than the node-id specify the position of the CPU
+# within the CPU topology (as defined by the machine property "smp",
+# thus see also type @SMPConfiguration)
+#
# @node-id: NUMA node ID the CPU belongs to
#
-# @socket-id: socket number within node/board the CPU belongs to
+# @drawer-id: drawer number within CPU topology the CPU belongs to
+# (since 8.2)
#
-# @die-id: die number within socket the CPU belongs to (since 4.1)
+# @book-id: book number within parent container the CPU belongs to
+# (since 8.2)
#
-# @cluster-id: cluster number within die the CPU belongs to (since
-# 7.1)
+# @socket-id: socket number within parent container the CPU belongs to
#
-# @core-id: core number within cluster the CPU belongs to
+# @die-id: die number within the parent container the CPU belongs to
+# (since 4.1)
#
-# @thread-id: thread number within core the CPU belongs to
+# @cluster-id: cluster number within the parent container the CPU
+# belongs to (since 7.1)
#
-# Note: currently there are 6 properties that could be present but
-# management should be prepared to pass through other properties
-# with device_add command to allow for future interface extension.
-# This also requires the filed names to be kept in sync with the
-# properties passed to -device/device_add.
+# @core-id: core number within the parent container the CPU
+# belongs to
+#
+# @thread-id: thread number within the core the CPU belongs to
+#
+# Note: management should be prepared to pass through additional
+# properties with device_add.
#
# Since: 2.7
##
{ 'struct': 'CpuInstanceProperties',
+ # Keep these in sync with the properties device_add accepts
'data': { '*node-id': 'int',
+ '*drawer-id': 'int',
+ '*book-id': 'int',
'*socket-id': 'int',
'*die-id': 'int',
'*cluster-id': 'int',
@@ -1478,26 +1502,43 @@
# Schema for CPU topology configuration. A missing value lets QEMU
# figure out a suitable value based on the ones that are provided.
#
-# @cpus: number of virtual CPUs in the virtual machine
-#
-# @sockets: number of sockets in the CPU topology
+# The members other than @cpus and @maxcpus define a topology of
+# containers.
#
-# @dies: number of dies per socket in the CPU topology
+# The ordering from highest/coarsest to lowest/finest is:
+# @drawers, @books, @sockets, @dies, @clusters, @cores, @threads.
#
-# @clusters: number of clusters per die in the CPU topology (since
-# 7.0)
+# Different architectures support different subsets of topology
+# containers.
#
-# @cores: number of cores per cluster in the CPU topology
+# For example, s390x does not have clusters and dies, and the socket
+# is the parent container of cores.
#
-# @threads: number of threads per core in the CPU topology
+# @cpus: number of virtual CPUs in the virtual machine
#
# @maxcpus: maximum number of hotpluggable virtual CPUs in the virtual
# machine
#
+# @drawers: number of drawers in the CPU topology (since 8.2)
+#
+# @books: number of books in the CPU topology (since 8.2)
+#
+# @sockets: number of sockets per parent container
+#
+# @dies: number of dies per parent container
+#
+# @clusters: number of clusters per parent container (since 7.0)
+#
+# @cores: number of cores per parent container
+#
+# @threads: number of threads per core
+#
# Since: 6.1
##
{ 'struct': 'SMPConfiguration', 'data': {
'*cpus': 'int',
+ '*drawers': 'int',
+ '*books': 'int',
'*sockets': 'int',
'*dies': 'int',
'*clusters': 'int',
diff --git a/qapi/meson.build b/qapi/meson.build
index 60a668b343..f81a37565c 100644
--- a/qapi/meson.build
+++ b/qapi/meson.build
@@ -36,6 +36,7 @@ qapi_all_modules = [
'error',
'introspect',
'job',
+ 'machine-common',
'machine',
'machine-target',
'migration',
diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json
index 6594afba31..c01ec335e6 100644
--- a/qapi/qapi-schema.json
+++ b/qapi/qapi-schema.json
@@ -66,6 +66,7 @@
{ 'include': 'introspect.json' }
{ 'include': 'qom.json' }
{ 'include': 'qdev.json' }
+{ 'include': 'machine-common.json' }
{ 'include': 'machine.json' }
{ 'include': 'machine-target.json' }
{ 'include': 'replay.json' }
diff --git a/qemu-options.hx b/qemu-options.hx
index 54a7e94970..e26230bac5 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -272,11 +272,14 @@ SRST
ERST
DEF("smp", HAS_ARG, QEMU_OPTION_smp,
- "-smp [[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,clusters=clusters][,cores=cores][,threads=threads]\n"
+ "-smp [[cpus=]n][,maxcpus=maxcpus][,drawers=drawers][,books=books][,sockets=sockets]\n"
+ " [,dies=dies][,clusters=clusters][,cores=cores][,threads=threads]\n"
" set the number of initial CPUs to 'n' [default=1]\n"
" maxcpus= maximum number of total CPUs, including\n"
" offline CPUs for hotplug, etc\n"
- " sockets= number of sockets on the machine board\n"
+ " drawers= number of drawers on the machine board\n"
+ " books= number of books in one drawer\n"
+ " sockets= number of sockets in one book\n"
" dies= number of dies in one socket\n"
" clusters= number of clusters in one die\n"
" cores= number of cores in one cluster\n"
diff --git a/roms/seabios-hppa b/roms/seabios-hppa
-Subproject 763e3b73499db5fef94087bd310bfc8ccbcf785
+Subproject fd5b6cf82369a1e53d68302fb6ede2b9e2afccd
diff --git a/scripts/qapi/gen.py b/scripts/qapi/gen.py
index bf5716b5f3..5412716617 100644
--- a/scripts/qapi/gen.py
+++ b/scripts/qapi/gen.py
@@ -13,8 +13,8 @@
from contextlib import contextmanager
import os
-import sys
import re
+import sys
from typing import (
Dict,
Iterator,
diff --git a/scripts/qapi/parser.py b/scripts/qapi/parser.py
index 22e7bcc4b1..bf31018aef 100644
--- a/scripts/qapi/parser.py
+++ b/scripts/qapi/parser.py
@@ -22,6 +22,7 @@ from typing import (
Dict,
List,
Mapping,
+ Match,
Optional,
Set,
Union,
@@ -563,11 +564,11 @@ class QAPIDoc:
self._switch_section(QAPIDoc.NullSection(self._parser))
@staticmethod
- def _match_at_name_colon(string: str):
+ def _match_at_name_colon(string: str) -> Optional[Match[str]]:
return re.match(r'@([^:]*): *', string)
@staticmethod
- def _match_section_tag(string: str):
+ def _match_section_tag(string: str) -> Optional[Match[str]]:
return re.match(r'(Returns|Since|Notes?|Examples?|TODO): *', string)
def _append_body_line(self, line: str) -> None:
diff --git a/scripts/qapi/schema.py b/scripts/qapi/schema.py
index 231ebf61ba..d739e558e9 100644
--- a/scripts/qapi/schema.py
+++ b/scripts/qapi/schema.py
@@ -73,6 +73,11 @@ class QAPISchemaEntity:
self.features = features or []
self._checked = False
+ def __repr__(self):
+ if self.name is None:
+ return "<%s at 0x%x>" % (type(self).__name__, id(self))
+ return "<%s:%s at 0x%x>" % type(self).__name__, self.name, id(self)
+
def c_name(self):
return c_name(self.name)
diff --git a/stubs/migr-blocker.c b/stubs/migr-blocker.c
index 5676a2f93c..17a5dbf87b 100644
--- a/stubs/migr-blocker.c
+++ b/stubs/migr-blocker.c
@@ -1,11 +1,11 @@
#include "qemu/osdep.h"
#include "migration/blocker.h"
-int migrate_add_blocker(Error *reason, Error **errp)
+int migrate_add_blocker(Error **reasonp, Error **errp)
{
return 0;
}
-void migrate_del_blocker(Error *reason)
+void migrate_del_blocker(Error **reasonp)
{
}
diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h
index b36a42a7ca..c2352904f0 100644
--- a/subprojects/libvhost-user/libvhost-user.h
+++ b/subprojects/libvhost-user/libvhost-user.h
@@ -65,7 +65,8 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
/* Feature 16 is reserved for VHOST_USER_PROTOCOL_F_STATUS. */
- VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 17,
+ /* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
+ VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18,
VHOST_USER_PROTOCOL_F_MAX
};
diff --git a/system/memory.c b/system/memory.c
index a800fbc9e5..4928f2525d 100644
--- a/system/memory.c
+++ b/system/memory.c
@@ -1535,7 +1535,12 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
adjust_endianness(mr, &data, op);
- if ((!kvm_eventfds_enabled()) &&
+ /*
+ * FIXME: it's not clear why under KVM the write would be processed
+ * directly, instead of going through eventfd. This probably should
+ * test "tcg_enabled() || qtest_enabled()", or should just go away.
+ */
+ if (!kvm_enabled() &&
memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
return MEMTX_OK;
}
@@ -2550,8 +2555,6 @@ void memory_region_clear_flush_coalesced(MemoryRegion *mr)
}
}
-static bool userspace_eventfd_warning;
-
void memory_region_add_eventfd(MemoryRegion *mr,
hwaddr addr,
unsigned size,
@@ -2568,13 +2571,6 @@ void memory_region_add_eventfd(MemoryRegion *mr,
};
unsigned i;
- if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
- userspace_eventfd_warning))) {
- userspace_eventfd_warning = true;
- error_report("Using eventfd without MMIO binding in KVM. "
- "Suboptimal performance expected");
- }
-
if (size) {
adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
}
diff --git a/system/memory_mapping.c b/system/memory_mapping.c
index d7f1d096e0..6f884c5b90 100644
--- a/system/memory_mapping.c
+++ b/system/memory_mapping.c
@@ -291,7 +291,7 @@ void guest_phys_blocks_append(GuestPhysBlockList *list)
memory_listener_unregister(&g.listener);
}
-static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
+static CPUState *find_paging_enabled_cpu(void)
{
CPUState *cpu;
@@ -304,26 +304,24 @@ static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
return NULL;
}
-void qemu_get_guest_memory_mapping(MemoryMappingList *list,
+bool qemu_get_guest_memory_mapping(MemoryMappingList *list,
const GuestPhysBlockList *guest_phys_blocks,
Error **errp)
{
+ ERRP_GUARD();
CPUState *cpu, *first_paging_enabled_cpu;
GuestPhysBlock *block;
ram_addr_t offset, length;
- first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
+ first_paging_enabled_cpu = find_paging_enabled_cpu();
if (first_paging_enabled_cpu) {
for (cpu = first_paging_enabled_cpu; cpu != NULL;
cpu = CPU_NEXT(cpu)) {
- Error *err = NULL;
- cpu_get_memory_mapping(cpu, list, &err);
- if (err) {
- error_propagate(errp, err);
- return;
+ if (!cpu_get_memory_mapping(cpu, list, errp)) {
+ return false;
}
}
- return;
+ return true;
}
/*
@@ -335,6 +333,7 @@ void qemu_get_guest_memory_mapping(MemoryMappingList *list,
length = block->target_end - block->target_start;
create_new_memory_mapping(list, offset, offset, length);
}
+ return true;
}
void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
diff --git a/system/qtest.c b/system/qtest.c
index 35b643a274..7964f0b248 100644
--- a/system/qtest.c
+++ b/system/qtest.c
@@ -866,7 +866,7 @@ void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **
{
ERRP_GUARD();
Chardev *chr;
- Object *qtest;
+ Object *qobj;
chr = qemu_chr_new("qtest", qtest_chrdev, NULL);
if (chr == NULL) {
@@ -875,18 +875,18 @@ void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **
return;
}
- qtest = object_new(TYPE_QTEST);
- object_property_set_str(qtest, "chardev", chr->label, &error_abort);
+ qobj = object_new(TYPE_QTEST);
+ object_property_set_str(qobj, "chardev", chr->label, &error_abort);
if (qtest_log) {
- object_property_set_str(qtest, "log", qtest_log, &error_abort);
+ object_property_set_str(qobj, "log", qtest_log, &error_abort);
}
- object_property_add_child(qdev_get_machine(), "qtest", qtest);
- user_creatable_complete(USER_CREATABLE(qtest), errp);
+ object_property_add_child(qdev_get_machine(), "qtest", qobj);
+ user_creatable_complete(USER_CREATABLE(qobj), errp);
if (*errp) {
- object_unparent(qtest);
+ object_unparent(qobj);
}
object_unref(OBJECT(chr));
- object_unref(qtest);
+ object_unref(qobj);
}
static bool qtest_server_start(QTest *q, Error **errp)
diff --git a/system/vl.c b/system/vl.c
index 3100ac01ed..92d29bf521 100644
--- a/system/vl.c
+++ b/system/vl.c
@@ -727,6 +727,12 @@ static QemuOptsList qemu_smp_opts = {
.name = "cpus",
.type = QEMU_OPT_NUMBER,
}, {
+ .name = "drawers",
+ .type = QEMU_OPT_NUMBER,
+ }, {
+ .name = "books",
+ .type = QEMU_OPT_NUMBER,
+ }, {
.name = "sockets",
.type = QEMU_OPT_NUMBER,
}, {
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
index 326a03153d..c078849403 100644
--- a/target/arm/arm-powerctl.c
+++ b/target/arm/arm-powerctl.c
@@ -65,60 +65,9 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
/* Initialize the cpu we are turning on */
cpu_reset(target_cpu_state);
+ arm_emulate_firmware_reset(target_cpu_state, info->target_el);
target_cpu_state->halted = 0;
- if (info->target_aa64) {
- if ((info->target_el < 3) && arm_feature(&target_cpu->env,
- ARM_FEATURE_EL3)) {
- /*
- * As target mode is AArch64, we need to set lower
- * exception level (the requested level 2) to AArch64
- */
- target_cpu->env.cp15.scr_el3 |= SCR_RW;
- }
-
- if ((info->target_el < 2) && arm_feature(&target_cpu->env,
- ARM_FEATURE_EL2)) {
- /*
- * As target mode is AArch64, we need to set lower
- * exception level (the requested level 1) to AArch64
- */
- target_cpu->env.cp15.hcr_el2 |= HCR_RW;
- }
-
- target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true);
- } else {
- /* We are requested to boot in AArch32 mode */
- static const uint32_t mode_for_el[] = { 0,
- ARM_CPU_MODE_SVC,
- ARM_CPU_MODE_HYP,
- ARM_CPU_MODE_SVC };
-
- cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M,
- CPSRWriteRaw);
- }
-
- if (info->target_el == 3) {
- /* Processor is in secure mode */
- target_cpu->env.cp15.scr_el3 &= ~SCR_NS;
- } else {
- /* Processor is not in secure mode */
- target_cpu->env.cp15.scr_el3 |= SCR_NS;
-
- /* Set NSACR.{CP11,CP10} so NS can access the FPU */
- target_cpu->env.cp15.nsacr |= 3 << 10;
-
- /*
- * If QEMU is providing the equivalent of EL3 firmware, then we need
- * to make sure a CPU targeting EL2 comes out of reset with a
- * functional HVC insn.
- */
- if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3)
- && info->target_el == 2) {
- target_cpu->env.cp15.scr_el3 |= SCR_HCE;
- }
- }
-
/* We check if the started CPU is now at the correct level */
assert(info->target_el == arm_current_el(&target_cpu->env));
diff --git a/target/arm/common-semi-target.h b/target/arm/common-semi-target.h
index 19438ed8cd..da51f2d7f5 100644
--- a/target/arm/common-semi-target.h
+++ b/target/arm/common-semi-target.h
@@ -10,9 +10,7 @@
#ifndef TARGET_ARM_COMMON_SEMI_TARGET_H
#define TARGET_ARM_COMMON_SEMI_TARGET_H
-#ifndef CONFIG_USER_ONLY
-#include "hw/arm/boot.h"
-#endif
+#include "target/arm/cpu-qom.h"
static inline target_ulong common_semi_arg(CPUState *cs, int argno)
{
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
index 514c22ced9..d06c08a734 100644
--- a/target/arm/cpu-qom.h
+++ b/target/arm/cpu-qom.h
@@ -23,8 +23,6 @@
#include "hw/core/cpu.h"
#include "qom/object.h"
-struct arm_boot_info;
-
#define TYPE_ARM_CPU "arm-cpu"
OBJECT_DECLARE_CPU_TYPE(ARMCPU, ARMCPUClass, ARM_CPU)
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 6c6c551573..aa4e006f21 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -553,6 +553,101 @@ static void arm_cpu_reset_hold(Object *obj)
}
}
+void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
+{
+ ARMCPU *cpu = ARM_CPU(cpustate);
+ CPUARMState *env = &cpu->env;
+ bool have_el3 = arm_feature(env, ARM_FEATURE_EL3);
+ bool have_el2 = arm_feature(env, ARM_FEATURE_EL2);
+
+ /*
+ * Check we have the EL we're aiming for. If that is the
+ * highest implemented EL, then cpu_reset has already done
+ * all the work.
+ */
+ switch (target_el) {
+ case 3:
+ assert(have_el3);
+ return;
+ case 2:
+ assert(have_el2);
+ if (!have_el3) {
+ return;
+ }
+ break;
+ case 1:
+ if (!have_el3 && !have_el2) {
+ return;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (have_el3) {
+ /*
+ * Set the EL3 state so code can run at EL2. This should match
+ * the requirements set by Linux in its booting spec.
+ */
+ if (env->aarch64) {
+ env->cp15.scr_el3 |= SCR_RW;
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ env->cp15.scr_el3 |= SCR_API | SCR_APK;
+ }
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ env->cp15.scr_el3 |= SCR_ATA;
+ }
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
+ env->vfp.zcr_el[3] = 0xf;
+ }
+ if (cpu_isar_feature(aa64_sme, cpu)) {
+ env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
+ env->cp15.scr_el3 |= SCR_ENTP2;
+ env->vfp.smcr_el[3] = 0xf;
+ }
+ if (cpu_isar_feature(aa64_hcx, cpu)) {
+ env->cp15.scr_el3 |= SCR_HXEN;
+ }
+ if (cpu_isar_feature(aa64_fgt, cpu)) {
+ env->cp15.scr_el3 |= SCR_FGTEN;
+ }
+ }
+
+ if (target_el == 2) {
+ /* If the guest is at EL2 then Linux expects the HVC insn to work */
+ env->cp15.scr_el3 |= SCR_HCE;
+ }
+
+ /* Put CPU into non-secure state */
+ env->cp15.scr_el3 |= SCR_NS;
+ /* Set NSACR.{CP11,CP10} so NS can access the FPU */
+ env->cp15.nsacr |= 3 << 10;
+ }
+
+ if (have_el2 && target_el < 2) {
+ /* Set EL2 state so code can run at EL1. */
+ if (env->aarch64) {
+ env->cp15.hcr_el2 |= HCR_RW;
+ }
+ }
+
+ /* Set the CPU to the desired state */
+ if (env->aarch64) {
+ env->pstate = aarch64_pstate_mode(target_el, true);
+ } else {
+ static const uint32_t mode_for_el[] = {
+ 0,
+ ARM_CPU_MODE_SVC,
+ ARM_CPU_MODE_HYP,
+ ARM_CPU_MODE_SVC,
+ };
+
+ cpsr_write(env, mode_for_el[target_el], CPSR_M, CPSRWriteRaw);
+ }
+}
+
+
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index a9edfb8353..76d4cef9e3 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1149,6 +1149,28 @@ int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, DumpState *s);
+/**
+ * arm_emulate_firmware_reset: Emulate firmware CPU reset handling
+ * @cpu: CPU (which must have been freshly reset)
+ * @target_el: exception level to put the CPU into
+ * @secure: whether to put the CPU in secure state
+ *
+ * When QEMU is directly running a guest kernel at a lower level than
+ * EL3 it implicitly emulates some aspects of the guest firmware.
+ * This includes that on reset we need to configure the parts of the
+ * CPU corresponding to EL3 so that the real guest code can run at its
+ * lower exception level. This function does that post-reset CPU setup,
+ * for when we do direct boot of a guest kernel, and for when we
+ * emulate PSCI and similar firmware interfaces starting a CPU at a
+ * lower exception level.
+ *
+ * @target_el must be an EL implemented by the CPU between 1 and 3.
+ * We do not support dropping into a Secure EL other than 3.
+ *
+ * It is the responsibility of the caller to call arm_rebuild_hflags().
+ */
+void arm_emulate_firmware_reset(CPUState *cpustate, int target_el);
+
#ifdef TARGET_AARCH64
int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 74fbb6e1d7..b29edb26af 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1283,7 +1283,7 @@ static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
- if (hpmn != 0 && counter >= hpmn) {
+ if (counter >= hpmn) {
return hlp;
}
}
@@ -2475,22 +2475,7 @@ static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
return CP_ACCESS_TRAP;
}
-
- /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
- if (hcr & HCR_E2H) {
- if (timeridx == GTIMER_PHYS &&
- !extract32(env->cp15.cnthctl_el2, 10, 1)) {
- return CP_ACCESS_TRAP_EL2;
- }
- } else {
- /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
- if (has_el2 && timeridx == GTIMER_PHYS &&
- !extract32(env->cp15.cnthctl_el2, 1, 1)) {
- return CP_ACCESS_TRAP_EL2;
- }
- }
- break;
-
+ /* fall through */
case 1:
/* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
if (has_el2 && timeridx == GTIMER_PHYS &&
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index b66b936a95..7903e2ddde 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -553,24 +553,19 @@ bool write_kvmstate_to_list(ARMCPU *cpu)
bool ok = true;
for (i = 0; i < cpu->cpreg_array_len; i++) {
- struct kvm_one_reg r;
uint64_t regidx = cpu->cpreg_indexes[i];
uint32_t v32;
int ret;
- r.id = regidx;
-
switch (regidx & KVM_REG_SIZE_MASK) {
case KVM_REG_SIZE_U32:
- r.addr = (uintptr_t)&v32;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ ret = kvm_get_one_reg(cs, regidx, &v32);
if (!ret) {
cpu->cpreg_values[i] = v32;
}
break;
case KVM_REG_SIZE_U64:
- r.addr = (uintptr_t)(cpu->cpreg_values + i);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ ret = kvm_get_one_reg(cs, regidx, cpu->cpreg_values + i);
break;
default:
g_assert_not_reached();
@@ -589,7 +584,6 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level)
bool ok = true;
for (i = 0; i < cpu->cpreg_array_len; i++) {
- struct kvm_one_reg r;
uint64_t regidx = cpu->cpreg_indexes[i];
uint32_t v32;
int ret;
@@ -598,19 +592,17 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level)
continue;
}
- r.id = regidx;
switch (regidx & KVM_REG_SIZE_MASK) {
case KVM_REG_SIZE_U32:
v32 = cpu->cpreg_values[i];
- r.addr = (uintptr_t)&v32;
+ ret = kvm_set_one_reg(cs, regidx, &v32);
break;
case KVM_REG_SIZE_U64:
- r.addr = (uintptr_t)(cpu->cpreg_values + i);
+ ret = kvm_set_one_reg(cs, regidx, cpu->cpreg_values + i);
break;
default:
g_assert_not_reached();
}
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
if (ret) {
/* We might fail for "unknown register" and also for
* "you tried to set a register which is constant with
@@ -709,17 +701,13 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
void kvm_arm_get_virtual_time(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
- struct kvm_one_reg reg = {
- .id = KVM_REG_ARM_TIMER_CNT,
- .addr = (uintptr_t)&cpu->kvm_vtime,
- };
int ret;
if (cpu->kvm_vtime_dirty) {
return;
}
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
if (ret) {
error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
abort();
@@ -731,17 +719,13 @@ void kvm_arm_get_virtual_time(CPUState *cs)
void kvm_arm_put_virtual_time(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
- struct kvm_one_reg reg = {
- .id = KVM_REG_ARM_TIMER_CNT,
- .addr = (uintptr_t)&cpu->kvm_vtime,
- };
int ret;
if (!cpu->kvm_vtime_dirty) {
return;
}
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
if (ret) {
error_report("Failed to set KVM_REG_ARM_TIMER_CNT");
abort();
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 5e95c496bb..4bb68646e4 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -30,7 +30,6 @@
#include "internals.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/ghes.h"
-#include "hw/arm/virt.h"
static bool have_guest_debug;
@@ -540,14 +539,10 @@ static int kvm_arm_sve_set_vls(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map };
- struct kvm_one_reg reg = {
- .id = KVM_REG_ARM64_SVE_VLS,
- .addr = (uint64_t)&vls[0],
- };
assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
- return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ return kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_VLS, &vls[0]);
}
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
@@ -726,19 +721,17 @@ static void kvm_inject_arm_sea(CPUState *c)
static int kvm_arch_put_fpsimd(CPUState *cs)
{
CPUARMState *env = &ARM_CPU(cs)->env;
- struct kvm_one_reg reg;
int i, ret;
for (i = 0; i < 32; i++) {
uint64_t *q = aa64_vfp_qreg(env, i);
#if HOST_BIG_ENDIAN
uint64_t fp_val[2] = { q[1], q[0] };
- reg.addr = (uintptr_t)fp_val;
+ ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]),
+ fp_val);
#else
- reg.addr = (uintptr_t)q;
+ ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
#endif
- reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret) {
return ret;
}
@@ -759,14 +752,11 @@ static int kvm_arch_put_sve(CPUState *cs)
CPUARMState *env = &cpu->env;
uint64_t tmp[ARM_MAX_VQ * 2];
uint64_t *r;
- struct kvm_one_reg reg;
int n, ret;
for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
- reg.addr = (uintptr_t)r;
- reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
if (ret) {
return ret;
}
@@ -775,9 +765,7 @@ static int kvm_arch_put_sve(CPUState *cs)
for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
- reg.addr = (uintptr_t)r;
- reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
if (ret) {
return ret;
}
@@ -785,9 +773,7 @@ static int kvm_arch_put_sve(CPUState *cs)
r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
- reg.addr = (uintptr_t)r;
- reg.id = KVM_REG_ARM64_SVE_FFR(0);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
if (ret) {
return ret;
}
@@ -797,7 +783,6 @@ static int kvm_arch_put_sve(CPUState *cs)
int kvm_arch_put_registers(CPUState *cs, int level)
{
- struct kvm_one_reg reg;
uint64_t val;
uint32_t fpr;
int i, ret;
@@ -814,9 +799,8 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
for (i = 0; i < 31; i++) {
- reg.id = AARCH64_CORE_REG(regs.regs[i]);
- reg.addr = (uintptr_t) &env->xregs[i];
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
+ &env->xregs[i]);
if (ret) {
return ret;
}
@@ -827,16 +811,12 @@ int kvm_arch_put_registers(CPUState *cs, int level)
*/
aarch64_save_sp(env, 1);
- reg.id = AARCH64_CORE_REG(regs.sp);
- reg.addr = (uintptr_t) &env->sp_el[0];
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
if (ret) {
return ret;
}
- reg.id = AARCH64_CORE_REG(sp_el1);
- reg.addr = (uintptr_t) &env->sp_el[1];
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
if (ret) {
return ret;
}
@@ -847,23 +827,17 @@ int kvm_arch_put_registers(CPUState *cs, int level)
} else {
val = cpsr_read(env);
}
- reg.id = AARCH64_CORE_REG(regs.pstate);
- reg.addr = (uintptr_t) &val;
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
if (ret) {
return ret;
}
- reg.id = AARCH64_CORE_REG(regs.pc);
- reg.addr = (uintptr_t) &env->pc;
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
if (ret) {
return ret;
}
- reg.id = AARCH64_CORE_REG(elr_el1);
- reg.addr = (uintptr_t) &env->elr_el[1];
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
if (ret) {
return ret;
}
@@ -882,9 +856,8 @@ int kvm_arch_put_registers(CPUState *cs, int level)
/* KVM 0-4 map to QEMU banks 1-5 */
for (i = 0; i < KVM_NR_SPSR; i++) {
- reg.id = AARCH64_CORE_REG(spsr[i]);
- reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
+ &env->banked_spsr[i + 1]);
if (ret) {
return ret;
}
@@ -899,18 +872,14 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
- reg.addr = (uintptr_t)(&fpr);
fpr = vfp_get_fpsr(env);
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
if (ret) {
return ret;
}
- reg.addr = (uintptr_t)(&fpr);
fpr = vfp_get_fpcr(env);
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
if (ret) {
return ret;
}
@@ -939,14 +908,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
static int kvm_arch_get_fpsimd(CPUState *cs)
{
CPUARMState *env = &ARM_CPU(cs)->env;
- struct kvm_one_reg reg;
int i, ret;
for (i = 0; i < 32; i++) {
uint64_t *q = aa64_vfp_qreg(env, i);
- reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
- reg.addr = (uintptr_t)q;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
if (ret) {
return ret;
} else {
@@ -970,15 +936,12 @@ static int kvm_arch_get_sve(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- struct kvm_one_reg reg;
uint64_t *r;
int n, ret;
for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
r = &env->vfp.zregs[n].d[0];
- reg.addr = (uintptr_t)r;
- reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
if (ret) {
return ret;
}
@@ -987,9 +950,7 @@ static int kvm_arch_get_sve(CPUState *cs)
for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
r = &env->vfp.pregs[n].p[0];
- reg.addr = (uintptr_t)r;
- reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
if (ret) {
return ret;
}
@@ -997,9 +958,7 @@ static int kvm_arch_get_sve(CPUState *cs)
}
r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
- reg.addr = (uintptr_t)r;
- reg.id = KVM_REG_ARM64_SVE_FFR(0);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
if (ret) {
return ret;
}
@@ -1010,7 +969,6 @@ static int kvm_arch_get_sve(CPUState *cs)
int kvm_arch_get_registers(CPUState *cs)
{
- struct kvm_one_reg reg;
uint64_t val;
unsigned int el;
uint32_t fpr;
@@ -1020,31 +978,24 @@ int kvm_arch_get_registers(CPUState *cs)
CPUARMState *env = &cpu->env;
for (i = 0; i < 31; i++) {
- reg.id = AARCH64_CORE_REG(regs.regs[i]);
- reg.addr = (uintptr_t) &env->xregs[i];
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
+ &env->xregs[i]);
if (ret) {
return ret;
}
}
- reg.id = AARCH64_CORE_REG(regs.sp);
- reg.addr = (uintptr_t) &env->sp_el[0];
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
if (ret) {
return ret;
}
- reg.id = AARCH64_CORE_REG(sp_el1);
- reg.addr = (uintptr_t) &env->sp_el[1];
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
if (ret) {
return ret;
}
- reg.id = AARCH64_CORE_REG(regs.pstate);
- reg.addr = (uintptr_t) &val;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
if (ret) {
return ret;
}
@@ -1061,9 +1012,7 @@ int kvm_arch_get_registers(CPUState *cs)
*/
aarch64_restore_sp(env, 1);
- reg.id = AARCH64_CORE_REG(regs.pc);
- reg.addr = (uintptr_t) &env->pc;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
if (ret) {
return ret;
}
@@ -1077,9 +1026,7 @@ int kvm_arch_get_registers(CPUState *cs)
aarch64_sync_64_to_32(env);
}
- reg.id = AARCH64_CORE_REG(elr_el1);
- reg.addr = (uintptr_t) &env->elr_el[1];
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
if (ret) {
return ret;
}
@@ -1089,9 +1036,8 @@ int kvm_arch_get_registers(CPUState *cs)
* KVM SPSRs 0-4 map to QEMU banks 1-5
*/
for (i = 0; i < KVM_NR_SPSR; i++) {
- reg.id = AARCH64_CORE_REG(spsr[i]);
- reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
+ &env->banked_spsr[i + 1]);
if (ret) {
return ret;
}
@@ -1112,17 +1058,13 @@ int kvm_arch_get_registers(CPUState *cs)
return ret;
}
- reg.addr = (uintptr_t)(&fpr);
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
if (ret) {
return ret;
}
vfp_set_fpsr(env, fpr);
- reg.addr = (uintptr_t)(&fpr);
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
if (ret) {
return ret;
}
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
index 1f918ff537..0d5d8e307d 100644
--- a/target/arm/tcg/cpu32.c
+++ b/target/arm/tcg/cpu32.c
@@ -89,6 +89,10 @@ void aa32_max_features(ARMCPU *cpu)
t = FIELD_DP32(t, ID_DFR0, COPSDBG, 9); /* FEAT_Debugv8p4 */
t = FIELD_DP32(t, ID_DFR0, PERFMON, 6); /* FEAT_PMUv3p5 */
cpu->isar.id_dfr0 = t;
+
+ t = cpu->isar.id_dfr1;
+ t = FIELD_DP32(t, ID_DFR1, HPMN0, 1); /* FEAT_HPMN0 */
+ cpu->isar.id_dfr1 = t;
}
/* CPU models. These are not needed for the AArch64 linux-user build. */
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
index 68928e5127..d978aa5f7a 100644
--- a/target/arm/tcg/cpu64.c
+++ b/target/arm/tcg/cpu64.c
@@ -1109,6 +1109,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = cpu->isar.id_aa64dfr0;
t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9); /* FEAT_Debugv8p4 */
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */
+ t = FIELD_DP64(t, ID_AA64DFR0, HPMN0, 1); /* FEAT_HPMN0 */
cpu->isar.id_aa64dfr0 = t;
t = cpu->isar.id_aa64smfr0;
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 10e8dcf743..ad78b8b120 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -1324,41 +1324,8 @@ static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
int extsize = extract32(option, 0, 2);
bool is_signed = extract32(option, 2, 1);
- if (is_signed) {
- switch (extsize) {
- case 0:
- tcg_gen_ext8s_i64(tcg_out, tcg_in);
- break;
- case 1:
- tcg_gen_ext16s_i64(tcg_out, tcg_in);
- break;
- case 2:
- tcg_gen_ext32s_i64(tcg_out, tcg_in);
- break;
- case 3:
- tcg_gen_mov_i64(tcg_out, tcg_in);
- break;
- }
- } else {
- switch (extsize) {
- case 0:
- tcg_gen_ext8u_i64(tcg_out, tcg_in);
- break;
- case 1:
- tcg_gen_ext16u_i64(tcg_out, tcg_in);
- break;
- case 2:
- tcg_gen_ext32u_i64(tcg_out, tcg_in);
- break;
- case 3:
- tcg_gen_mov_i64(tcg_out, tcg_in);
- break;
- }
- }
-
- if (shift) {
- tcg_gen_shli_i64(tcg_out, tcg_out, shift);
- }
+ tcg_gen_ext_i64(tcg_out, tcg_in, extsize | (is_signed ? MO_SIGN : 0));
+ tcg_gen_shli_i64(tcg_out, tcg_out, shift);
}
static inline void gen_check_sp_alignment(DisasContext *s)
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index 48927fbb8c..b3660173d1 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -7882,7 +7882,7 @@ static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
}
}
-static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
+static bool op_stm(DisasContext *s, arg_ldst_block *a)
{
int i, j, n, list, mem_idx;
bool user = a->u;
@@ -7899,7 +7899,14 @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
list = a->list;
n = ctpop16(list);
- if (n < min_n || a->rn == 15) {
+ /*
+ * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
+ * to UNDEF. In the T32 STM encoding n == 1 is also UNPREDICTABLE,
+ * but hardware treats it like the A32 version and implements the
+ * single-register-store, and some in-the-wild (buggy) software
+ * assumes that, so we don't UNDEF on that case.
+ */
+ if (n < 1 || a->rn == 15) {
unallocated_encoding(s);
return true;
}
@@ -7935,8 +7942,7 @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
static bool trans_STM(DisasContext *s, arg_ldst_block *a)
{
- /* BitCount(list) < 1 is UNPREDICTABLE */
- return op_stm(s, a, 1);
+ return op_stm(s, a);
}
static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
@@ -7946,11 +7952,10 @@ static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
unallocated_encoding(s);
return true;
}
- /* BitCount(list) < 2 is UNPREDICTABLE */
- return op_stm(s, a, 2);
+ return op_stm(s, a);
}
-static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
+static bool do_ldm(DisasContext *s, arg_ldst_block *a)
{
int i, j, n, list, mem_idx;
bool loaded_base;
@@ -7979,7 +7984,14 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
list = a->list;
n = ctpop16(list);
- if (n < min_n || a->rn == 15) {
+ /*
+ * This is UNPREDICTABLE for n < 1 in all encodings, and we choose
+ * to UNDEF. In the T32 LDM encoding n == 1 is also UNPREDICTABLE,
+ * but hardware treats it like the A32 version and implements the
+ * single-register-load, and some in-the-wild (buggy) software
+ * assumes that, so we don't UNDEF on that case.
+ */
+ if (n < 1 || a->rn == 15) {
unallocated_encoding(s);
return true;
}
@@ -8045,8 +8057,7 @@ static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
unallocated_encoding(s);
return true;
}
- /* BitCount(list) < 1 is UNPREDICTABLE */
- return do_ldm(s, a, 1);
+ return do_ldm(s, a);
}
static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
@@ -8056,16 +8067,14 @@ static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
unallocated_encoding(s);
return true;
}
- /* BitCount(list) < 2 is UNPREDICTABLE */
- return do_ldm(s, a, 2);
+ return do_ldm(s, a);
}
static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
{
/* Writeback is conditional on the base register not being loaded. */
a->w = !(a->list & (1 << a->rn));
- /* BitCount(list) < 1 is UNPREDICTABLE */
- return do_ldm(s, a, 1);
+ return do_ldm(s, a);
}
static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c
index 271cb5e41b..d1ff659128 100644
--- a/target/i386/arch_memory_mapping.c
+++ b/target/i386/arch_memory_mapping.c
@@ -266,7 +266,7 @@ static void walk_pml5e(MemoryMappingList *list, AddressSpace *as,
}
#endif
-void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
+bool x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
Error **errp)
{
X86CPU *cpu = X86_CPU(cs);
@@ -275,7 +275,7 @@ void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
if (!cpu_paging_enabled(cs)) {
/* paging is disabled */
- return;
+ return true;
}
a20_mask = x86_get_a20_mask(env);
@@ -310,5 +310,7 @@ void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
pse = !!(env->cr[4] & CR4_PSE_MASK);
walk_pde2(list, cs->as, pde_addr, a20_mask, pse);
}
+
+ return true;
}
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index bdca901dfa..fc8484cb5e 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -714,7 +714,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_RDSEED | \
- CPUID_7_0_EBX_KERNEL_FEATURES)
+ CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_KERNEL_FEATURES)
/* missing:
CPUID_7_0_EBX_HLE
CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM */
@@ -7377,7 +7377,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
return;
}
- if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
+ if (env->features[FEAT_1_EDX] & (CPUID_PSE36 | CPUID_PAE)) {
cpu->phys_bits = 36;
} else {
cpu->phys_bits = 32;
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index e1875466b9..471e71dbc5 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2055,7 +2055,7 @@ int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
DumpState *s);
-void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+bool x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
Error **errp);
void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index ab72bcdfad..770e81d56e 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -91,6 +91,15 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_INFO(SET_TSS_ADDR),
KVM_CAP_INFO(EXT_CPUID),
KVM_CAP_INFO(MP_STATE),
+ KVM_CAP_INFO(SIGNAL_MSI),
+ KVM_CAP_INFO(IRQ_ROUTING),
+ KVM_CAP_INFO(DEBUGREGS),
+ KVM_CAP_INFO(XSAVE),
+ KVM_CAP_INFO(VCPU_EVENTS),
+ KVM_CAP_INFO(X86_ROBUST_SINGLESTEP),
+ KVM_CAP_INFO(MCE),
+ KVM_CAP_INFO(ADJUST_CLOCK),
+ KVM_CAP_INFO(SET_IDENTITY_MAP_ADDR),
KVM_CAP_LAST_INFO
};
@@ -134,10 +143,8 @@ static uint32_t has_architectural_pmu_version;
static uint32_t num_architectural_pmu_gp_counters;
static uint32_t num_architectural_pmu_fixed_counters;
-static int has_xsave;
static int has_xsave2;
static int has_xcrs;
-static int has_pit_state2;
static int has_sregs2;
static int has_exception_payload;
static int has_triple_fault_event;
@@ -154,11 +161,6 @@ static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES];
static RateLimit bus_lock_ratelimit_ctrl;
static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
-bool kvm_has_pit_state2(void)
-{
- return !!has_pit_state2;
-}
-
bool kvm_has_smm(void)
{
return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM);
@@ -171,11 +173,6 @@ bool kvm_has_adjust_clock_stable(void)
return (ret & KVM_CLOCK_TSC_STABLE);
}
-bool kvm_has_adjust_clock(void)
-{
- return kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
-}
-
bool kvm_has_exception_payload(void)
{
return has_exception_payload;
@@ -577,14 +574,8 @@ uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
int *max_banks)
{
- int r;
-
- r = kvm_check_extension(s, KVM_CAP_MCE);
- if (r > 0) {
- *max_banks = r;
- return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
- }
- return -ENOSYS;
+ *max_banks = kvm_check_extension(s, KVM_CAP_MCE);
+ return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
}
static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
@@ -687,15 +678,6 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false);
}
-static void kvm_reset_exception(CPUX86State *env)
-{
- env->exception_nr = -1;
- env->exception_pending = 0;
- env->exception_injected = 0;
- env->exception_has_payload = false;
- env->exception_payload = 0;
-}
-
static void kvm_queue_exception(CPUX86State *env,
int32_t exception_nr,
uint8_t exception_has_payload,
@@ -728,38 +710,6 @@ static void kvm_queue_exception(CPUX86State *env,
}
}
-static int kvm_inject_mce_oldstyle(X86CPU *cpu)
-{
- CPUX86State *env = &cpu->env;
-
- if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
- unsigned int bank, bank_num = env->mcg_cap & 0xff;
- struct kvm_x86_mce mce;
-
- kvm_reset_exception(env);
-
- /*
- * There must be at least one bank in use if an MCE is pending.
- * Find it and use its values for the event injection.
- */
- for (bank = 0; bank < bank_num; bank++) {
- if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
- break;
- }
- }
- assert(bank < bank_num);
-
- mce.bank = bank;
- mce.status = env->mce_banks[bank * 4 + 1];
- mce.mcg_status = env->mcg_status;
- mce.addr = env->mce_banks[bank * 4 + 2];
- mce.misc = env->mce_banks[bank * 4 + 3];
-
- return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
- }
- return 0;
-}
-
static void cpu_update_state(void *opaque, bool running, RunState state)
{
CPUX86State *env = opaque;
@@ -1603,7 +1553,7 @@ static int hyperv_init_vcpu(X86CPU *cpu)
error_setg(&hv_passthrough_mig_blocker,
"'hv-passthrough' CPU flag prevents migration, use explicit"
" set of hv-* flags instead");
- ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
+ ret = migrate_add_blocker(&hv_passthrough_mig_blocker, &local_err);
if (ret < 0) {
error_report_err(local_err);
return ret;
@@ -1617,7 +1567,7 @@ static int hyperv_init_vcpu(X86CPU *cpu)
" use explicit 'hv-no-nonarch-coresharing=on' instead (but"
" make sure SMT is disabled and/or that vCPUs are properly"
" pinned)");
- ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err);
+ ret = migrate_add_blocker(&hv_no_nonarch_cs_mig_blocker, &local_err);
if (ret < 0) {
error_report_err(local_err);
return ret;
@@ -1711,10 +1661,8 @@ static void kvm_init_xsave(CPUX86State *env)
{
if (has_xsave2) {
env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096);
- } else if (has_xsave) {
- env->xsave_buf_len = sizeof(struct kvm_xsave);
} else {
- return;
+ env->xsave_buf_len = sizeof(struct kvm_xsave);
}
env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
@@ -2154,8 +2102,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (((env->cpuid_version >> 8)&0xF) >= 6
&& (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
- (CPUID_MCE | CPUID_MCA)
- && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
+ (CPUID_MCE | CPUID_MCA)) {
uint64_t mcg_cap, unsupported_caps;
int banks;
int ret;
@@ -2213,7 +2160,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
error_setg(&invtsc_mig_blocker,
"State blocked by non-migratable CPU device"
" (invtsc flag)");
- r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
+ r = migrate_add_blocker(&invtsc_mig_blocker, &local_err);
if (r < 0) {
error_report_err(local_err);
return r;
@@ -2271,7 +2218,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
return 0;
fail:
- migrate_del_blocker(invtsc_mig_blocker);
+ migrate_del_blocker(&invtsc_mig_blocker);
return r;
}
@@ -2589,14 +2536,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
return ret;
}
- if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
- error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM");
- return -ENOTSUP;
- }
-
- has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
- has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0;
hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
@@ -2654,20 +2594,13 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
* In order to use vm86 mode, an EPT identity map and a TSS are needed.
* Since these must be part of guest physical memory, we need to allocate
* them, both by setting their start addresses in the kernel and by
- * creating a corresponding e820 entry. We need 4 pages before the BIOS.
- *
- * Older KVM versions may not support setting the identity map base. In
- * that case we need to stick with the default, i.e. a 256K maximum BIOS
- * size.
+ * creating a corresponding e820 entry. We need 4 pages before the BIOS,
+ * so this value allows up to 16M BIOSes.
*/
- if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
- /* Allows up to 16M BIOSes. */
- identity_base = 0xfeffc000;
-
- ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
- if (ret < 0) {
- return ret;
- }
+ identity_base = 0xfeffc000;
+ ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
+ if (ret < 0) {
+ return ret;
}
/* Set TSS base one page after EPT identity map. */
@@ -2879,40 +2812,11 @@ static int kvm_getput_regs(X86CPU *cpu, int set)
return ret;
}
-static int kvm_put_fpu(X86CPU *cpu)
-{
- CPUX86State *env = &cpu->env;
- struct kvm_fpu fpu;
- int i;
-
- memset(&fpu, 0, sizeof fpu);
- fpu.fsw = env->fpus & ~(7 << 11);
- fpu.fsw |= (env->fpstt & 7) << 11;
- fpu.fcw = env->fpuc;
- fpu.last_opcode = env->fpop;
- fpu.last_ip = env->fpip;
- fpu.last_dp = env->fpdp;
- for (i = 0; i < 8; ++i) {
- fpu.ftwx |= (!env->fptags[i]) << i;
- }
- memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
- for (i = 0; i < CPU_NB_REGS; i++) {
- stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
- stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
- }
- fpu.mxcsr = env->mxcsr;
-
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
-}
-
static int kvm_put_xsave(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
void *xsave = env->xsave_buf;
- if (!has_xsave) {
- return kvm_put_fpu(cpu);
- }
x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len);
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
@@ -3657,46 +3561,12 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
}
-static int kvm_get_fpu(X86CPU *cpu)
-{
- CPUX86State *env = &cpu->env;
- struct kvm_fpu fpu;
- int i, ret;
-
- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
- if (ret < 0) {
- return ret;
- }
-
- env->fpstt = (fpu.fsw >> 11) & 7;
- env->fpus = fpu.fsw;
- env->fpuc = fpu.fcw;
- env->fpop = fpu.last_opcode;
- env->fpip = fpu.last_ip;
- env->fpdp = fpu.last_dp;
- for (i = 0; i < 8; ++i) {
- env->fptags[i] = !((fpu.ftwx >> i) & 1);
- }
- memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
- for (i = 0; i < CPU_NB_REGS; i++) {
- env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
- env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
- }
- env->mxcsr = fpu.mxcsr;
-
- return 0;
-}
-
static int kvm_get_xsave(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
void *xsave = env->xsave_buf;
int type, ret;
- if (!has_xsave) {
- return kvm_get_fpu(cpu);
- }
-
type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE;
ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave);
if (ret < 0) {
@@ -4427,10 +4297,6 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
CPUX86State *env = &cpu->env;
struct kvm_vcpu_events events = {};
- if (!kvm_has_vcpu_events()) {
- return 0;
- }
-
events.flags = 0;
if (has_exception_payload) {
@@ -4498,10 +4364,6 @@ static int kvm_get_vcpu_events(X86CPU *cpu)
struct kvm_vcpu_events events;
int ret;
- if (!kvm_has_vcpu_events()) {
- return 0;
- }
-
memset(&events, 0, sizeof(events));
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
if (ret < 0) {
@@ -4567,47 +4429,12 @@ static int kvm_get_vcpu_events(X86CPU *cpu)
return 0;
}
-static int kvm_guest_debug_workarounds(X86CPU *cpu)
-{
- CPUState *cs = CPU(cpu);
- CPUX86State *env = &cpu->env;
- int ret = 0;
- unsigned long reinject_trap = 0;
-
- if (!kvm_has_vcpu_events()) {
- if (env->exception_nr == EXCP01_DB) {
- reinject_trap = KVM_GUESTDBG_INJECT_DB;
- } else if (env->exception_injected == EXCP03_INT3) {
- reinject_trap = KVM_GUESTDBG_INJECT_BP;
- }
- kvm_reset_exception(env);
- }
-
- /*
- * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
- * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
- * by updating the debug state once again if single-stepping is on.
- * Another reason to call kvm_update_guest_debug here is a pending debug
- * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
- * reinject them via SET_GUEST_DEBUG.
- */
- if (reinject_trap ||
- (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
- ret = kvm_update_guest_debug(cs, reinject_trap);
- }
- return ret;
-}
-
static int kvm_put_debugregs(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
struct kvm_debugregs dbgregs;
int i;
- if (!kvm_has_debugregs()) {
- return 0;
- }
-
memset(&dbgregs, 0, sizeof(dbgregs));
for (i = 0; i < 4; i++) {
dbgregs.db[i] = env->dr[i];
@@ -4625,10 +4452,6 @@ static int kvm_get_debugregs(X86CPU *cpu)
struct kvm_debugregs dbgregs;
int i, ret;
- if (!kvm_has_debugregs()) {
- return 0;
- }
-
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
if (ret < 0) {
return ret;
@@ -4778,11 +4601,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
if (ret < 0) {
return ret;
}
- /* must be before kvm_put_msrs */
- ret = kvm_inject_mce_oldstyle(x86_cpu);
- if (ret < 0) {
- return ret;
- }
ret = kvm_put_msrs(x86_cpu, level);
if (ret < 0) {
return ret;
@@ -4806,11 +4624,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
if (ret < 0) {
return ret;
}
- /* must be last */
- ret = kvm_guest_debug_workarounds(x86_cpu);
- if (ret < 0) {
- return ret;
- }
return 0;
}
diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h
index 55d4e68c34..30fedcffea 100644
--- a/target/i386/kvm/kvm_i386.h
+++ b/target/i386/kvm/kvm_i386.h
@@ -33,7 +33,6 @@
bool kvm_has_smm(void);
bool kvm_enable_x2apic(void);
bool kvm_hv_vpindex_settable(void);
-bool kvm_has_pit_state2(void);
bool kvm_enable_sgx_provisioning(KVMState *s);
bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp);
@@ -50,7 +49,6 @@ void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask);
#ifdef CONFIG_KVM
-bool kvm_has_adjust_clock(void);
bool kvm_has_adjust_clock_stable(void);
bool kvm_has_exception_payload(void);
void kvm_synchronize_all_tsc(void);
diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c
index fb769868f2..7d752bc5e0 100644
--- a/target/i386/nvmm/nvmm-all.c
+++ b/target/i386/nvmm/nvmm-all.c
@@ -929,9 +929,8 @@ nvmm_init_vcpu(CPUState *cpu)
error_setg(&nvmm_migration_blocker,
"NVMM: Migration not supported");
- if (migrate_add_blocker(nvmm_migration_blocker, &local_error) < 0) {
+ if (migrate_add_blocker(&nvmm_migration_blocker, &local_error) < 0) {
error_report_err(local_error);
- error_free(nvmm_migration_blocker);
return -EINVAL;
}
}
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index 33908c0691..6a465a35fd 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -2527,6 +2527,134 @@ SSE_HELPER_FMAP(helper_fma4ps, ZMM_S, 2 << SHIFT, float32_muladd)
SSE_HELPER_FMAP(helper_fma4pd, ZMM_D, 1 << SHIFT, float64_muladd)
#endif
+#if SHIFT == 1
+#define SSE_HELPER_SHA1RNDS4(name, F, K) \
+ void name(Reg *d, Reg *a, Reg *b) \
+ { \
+ uint32_t A, B, C, D, E, t, i; \
+ \
+ A = a->L(3); \
+ B = a->L(2); \
+ C = a->L(1); \
+ D = a->L(0); \
+ E = 0; \
+ \
+ for (i = 0; i <= 3; i++) { \
+ t = F(B, C, D) + rol32(A, 5) + b->L(3 - i) + E + K; \
+ E = D; \
+ D = C; \
+ C = rol32(B, 30); \
+ B = A; \
+ A = t; \
+ } \
+ \
+ d->L(3) = A; \
+ d->L(2) = B; \
+ d->L(1) = C; \
+ d->L(0) = D; \
+ }
+
+#define SHA1_F0(b, c, d) (((b) & (c)) ^ (~(b) & (d)))
+#define SHA1_F1(b, c, d) ((b) ^ (c) ^ (d))
+#define SHA1_F2(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d)))
+
+SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f0, SHA1_F0, 0x5A827999)
+SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f1, SHA1_F1, 0x6ED9EBA1)
+SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f2, SHA1_F2, 0x8F1BBCDC)
+SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f3, SHA1_F1, 0xCA62C1D6)
+
+void helper_sha1nexte(Reg *d, Reg *a, Reg *b)
+{
+ d->L(3) = b->L(3) + rol32(a->L(3), 30);
+ d->L(2) = b->L(2);
+ d->L(1) = b->L(1);
+ d->L(0) = b->L(0);
+}
+
+void helper_sha1msg1(Reg *d, Reg *a, Reg *b)
+{
+ /* These could be overwritten by the first two assignments, save them. */
+ uint32_t b3 = b->L(3);
+ uint32_t b2 = b->L(2);
+
+ d->L(3) = a->L(3) ^ a->L(1);
+ d->L(2) = a->L(2) ^ a->L(0);
+ d->L(1) = a->L(1) ^ b3;
+ d->L(0) = a->L(0) ^ b2;
+}
+
+void helper_sha1msg2(Reg *d, Reg *a, Reg *b)
+{
+ d->L(3) = rol32(a->L(3) ^ b->L(2), 1);
+ d->L(2) = rol32(a->L(2) ^ b->L(1), 1);
+ d->L(1) = rol32(a->L(1) ^ b->L(0), 1);
+ d->L(0) = rol32(a->L(0) ^ d->L(3), 1);
+}
+
+#define SHA256_CH(e, f, g) (((e) & (f)) ^ (~(e) & (g)))
+#define SHA256_MAJ(a, b, c) (((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c)))
+
+#define SHA256_RNDS0(w) (ror32((w), 2) ^ ror32((w), 13) ^ ror32((w), 22))
+#define SHA256_RNDS1(w) (ror32((w), 6) ^ ror32((w), 11) ^ ror32((w), 25))
+#define SHA256_MSGS0(w) (ror32((w), 7) ^ ror32((w), 18) ^ ((w) >> 3))
+#define SHA256_MSGS1(w) (ror32((w), 17) ^ ror32((w), 19) ^ ((w) >> 10))
+
+void helper_sha256rnds2(Reg *d, Reg *a, Reg *b, uint32_t wk0, uint32_t wk1)
+{
+ uint32_t t, AA, EE;
+
+ uint32_t A = b->L(3);
+ uint32_t B = b->L(2);
+ uint32_t C = a->L(3);
+ uint32_t D = a->L(2);
+ uint32_t E = b->L(1);
+ uint32_t F = b->L(0);
+ uint32_t G = a->L(1);
+ uint32_t H = a->L(0);
+
+ /* Even round */
+ t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk0 + H;
+ AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A);
+ EE = t + D;
+
+ /* These will be B and F at the end of the odd round */
+ d->L(2) = AA;
+ d->L(0) = EE;
+
+ D = C, C = B, B = A, A = AA;
+ H = G, G = F, F = E, E = EE;
+
+ /* Odd round */
+ t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk1 + H;
+ AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A);
+ EE = t + D;
+
+ d->L(3) = AA;
+ d->L(1) = EE;
+}
+
+void helper_sha256msg1(Reg *d, Reg *a, Reg *b)
+{
+ /* b->L(0) could be overwritten by the first assignment, save it. */
+ uint32_t b0 = b->L(0);
+
+ d->L(0) = a->L(0) + SHA256_MSGS0(a->L(1));
+ d->L(1) = a->L(1) + SHA256_MSGS0(a->L(2));
+ d->L(2) = a->L(2) + SHA256_MSGS0(a->L(3));
+ d->L(3) = a->L(3) + SHA256_MSGS0(b0);
+}
+
+void helper_sha256msg2(Reg *d, Reg *a, Reg *b)
+{
+ /* Earlier assignments cannot overwrite any of the two operands. */
+ d->L(0) = a->L(0) + SHA256_MSGS1(b->L(2));
+ d->L(1) = a->L(1) + SHA256_MSGS1(b->L(3));
+ /* Yes, this reuses the previously computed values. */
+ d->L(2) = a->L(2) + SHA256_MSGS1(d->L(0));
+ d->L(3) = a->L(3) + SHA256_MSGS1(d->L(1));
+}
+#endif
+
#undef SSE_HELPER_S
#undef LANE_WIDTH
diff --git a/target/i386/sev.c b/target/i386/sev.c
index fe2144c038..9a71246682 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -891,7 +891,7 @@ sev_launch_finish(SevGuestState *sev)
/* add migration blocker */
error_setg(&sev_mig_blocker,
"SEV: Migration is not implemented");
- migrate_add_blocker(sev_mig_blocker, &error_fatal);
+ migrate_add_blocker(&sev_mig_blocker, &error_fatal);
}
static void
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index 7d76f15275..2bdbb1bba0 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -23,7 +23,11 @@
* The decoder is mostly based on tables copied from the Intel SDM. As
* a result, most operand load and writeback is done entirely in common
* table-driven code using the same operand type (X86_TYPE_*) and
- * size (X86_SIZE_*) codes used in the manual.
+ * size (X86_SIZE_*) codes used in the manual. There are a few differences
+ * though.
+ *
+ * Vector operands
+ * ---------------
*
* The main difference is that the V, U and W types are extended to
* cover MMX as well; if an instruction is like
@@ -43,6 +47,50 @@
* There are a couple cases in which instructions (e.g. MOVD) write the
* whole XMM or MM register but are established incorrectly in the manual
* as "d" or "q". These have to be fixed for the decoder to work correctly.
+ *
+ * VEX exception classes
+ * ---------------------
+ *
+ * Speaking about imprecisions in the manual, the decoder treats all
+ * exception-class 4 instructions as having an optional VEX prefix, and
+ * all exception-class 6 instructions as having a mandatory VEX prefix.
+ * This is true except for a dozen instructions; these are in exception
+ * class 4 but do not ignore the VEX.W bit (which does not even exist
+ * without a VEX prefix). These instructions are mostly listed in Intel's
+ * table 2-16, but with a few exceptions.
+ *
+ * The AMD manual has more precise subclasses for exceptions, and unlike Intel
+ * they list the VEX.W requirements in the exception classes as well (except
+ * when they don't). AMD describes class 6 as "AVX Mixed Memory Argument"
+ * without defining what a mixed memory argument is, but still use 4 as the
+ * primary exception class... except when they don't.
+ *
+ * The summary is:
+ * Intel AMD VEX.W note
+ * -------------------------------------------------------------------
+ * vpblendd 4 4J 0
+ * vpblendvb 4 4E-X 0 (*)
+ * vpbroadcastq 6 6D 0 (+)
+ * vpermd/vpermps 4 4H 0 (§)
+ * vpermq/vpermpd 4 4H-1 1 (§)
+ * vpermilpd/vpermilps 4 6E 0 (^)
+ * vpmaskmovd 6 4K significant (^)
+ * vpsllv 4 4K significant
+ * vpsrav 4 4J 0
+ * vpsrlv 4 4K significant
+ * vtestps/vtestpd 4 4G 0
+ *
+ * (*) AMD lists VPBLENDVB as related to SSE4.1 PBLENDVB, which may
+ * explain why it is considered exception class 4. However,
+ * Intel says that VEX-only instructions should be in class 6...
+ *
+ * (+) Not found in Intel's table 2-16
+ *
+ * (§) 4H and 4H-1 do not mention VEX.W requirements, which are
+ * however present in the description of the instruction
+ *
+ * (^) these are the two cases in which Intel and AMD disagree on the
+ * primary exception class
*/
#define X86_OP_NONE { 0 },
@@ -90,8 +138,6 @@
X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__)
#define cpuid(feat) .cpuid = X86_FEAT_##feat,
-#define i64 .special = X86_SPECIAL_i64,
-#define o64 .special = X86_SPECIAL_o64,
#define xchg .special = X86_SPECIAL_Locked,
#define mmx .special = X86_SPECIAL_MMX,
#define zext0 .special = X86_SPECIAL_ZExtOp0,
@@ -114,6 +160,9 @@
#define vex12 .vex_class = 12,
#define vex13 .vex_class = 13,
+#define chk(a) .check = X86_CHECK_##a,
+#define svm(a) .intercept = SVM_EXIT_##a,
+
#define avx2_256 .vex_special = X86_VEX_AVX2_256,
#define P_00 1
@@ -161,8 +210,8 @@ static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry,
};
static const X86OpEntry group15_mem[8] = {
- [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5),
- [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5),
+ [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5 chk(VEX128)),
+ [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5 chk(VEX128)),
};
uint8_t modrm = get_modrm(s, env);
@@ -337,11 +386,11 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = {
[0x07] = X86_OP_ENTRY3(PHSUBSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66),
[0x10] = X86_OP_ENTRY2(PBLENDVB, V,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66),
- [0x13] = X86_OP_ENTRY2(VCVTPH2PS, V,x, W,xh, vex11 cpuid(F16C) p_66),
+ [0x13] = X86_OP_ENTRY2(VCVTPH2PS, V,x, W,xh, vex11 chk(W0) cpuid(F16C) p_66),
[0x14] = X86_OP_ENTRY2(BLENDVPS, V,x, W,x, vex4 cpuid(SSE41) p_66),
[0x15] = X86_OP_ENTRY2(BLENDVPD, V,x, W,x, vex4 cpuid(SSE41) p_66),
/* Listed incorrectly as type 4 */
- [0x16] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66),
+ [0x16] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), /* vpermps */
[0x17] = X86_OP_ENTRY3(VPTEST, None,None, V,x, W,x, vex4 cpuid(SSE41) p_66),
/*
@@ -362,14 +411,14 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = {
[0x33] = X86_OP_ENTRY3(VPMOVZXWD, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66),
[0x34] = X86_OP_ENTRY3(VPMOVZXWQ, V,x, None,None, W,d, vex5 cpuid(SSE41) avx_movx avx2_256 p_66),
[0x35] = X86_OP_ENTRY3(VPMOVZXDQ, V,x, None,None, W,q, vex5 cpuid(SSE41) avx_movx avx2_256 p_66),
- [0x36] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66),
+ [0x36] = X86_OP_ENTRY3(VPERMD, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66),
[0x37] = X86_OP_ENTRY3(PCMPGTQ, V,x, H,x, W,x, vex4 cpuid(SSE42) avx2_256 p_66),
[0x40] = X86_OP_ENTRY3(PMULLD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66),
[0x41] = X86_OP_ENTRY3(VPHMINPOSUW, V,dq, None,None, W,dq, vex4 cpuid(SSE41) p_66),
/* Listed incorrectly as type 4 */
[0x45] = X86_OP_ENTRY3(VPSRLV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66),
- [0x46] = X86_OP_ENTRY3(VPSRAV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66),
+ [0x46] = X86_OP_ENTRY3(VPSRAV, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX2) p_66),
[0x47] = X86_OP_ENTRY3(VPSLLV, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66),
[0x90] = X86_OP_ENTRY3(VPGATHERD, V,x, H,x, M,d, vex12 cpuid(AVX2) p_66), /* vpgatherdd/q */
@@ -391,14 +440,15 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = {
[0x09] = X86_OP_ENTRY3(PSIGNW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66),
[0x0a] = X86_OP_ENTRY3(PSIGND, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66),
[0x0b] = X86_OP_ENTRY3(PMULHRSW, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66),
- [0x0c] = X86_OP_ENTRY3(VPERMILPS, V,x, H,x, W,x, vex4 cpuid(AVX) p_00_66),
- [0x0d] = X86_OP_ENTRY3(VPERMILPD, V,x, H,x, W,x, vex4 cpuid(AVX) p_66),
- [0x0e] = X86_OP_ENTRY3(VTESTPS, None,None, V,x, W,x, vex4 cpuid(AVX) p_66),
- [0x0f] = X86_OP_ENTRY3(VTESTPD, None,None, V,x, W,x, vex4 cpuid(AVX) p_66),
-
- [0x18] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 cpuid(AVX) p_66), /* vbroadcastss */
- [0x19] = X86_OP_ENTRY3(VPBROADCASTQ, V,qq, None,None, W,q, vex6 cpuid(AVX) p_66), /* vbroadcastsd */
- [0x1a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 cpuid(AVX) p_66),
+ /* Listed incorrectly as type 4 */
+ [0x0c] = X86_OP_ENTRY3(VPERMILPS, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_00_66),
+ [0x0d] = X86_OP_ENTRY3(VPERMILPD, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66),
+ [0x0e] = X86_OP_ENTRY3(VTESTPS, None,None, V,x, W,x, vex6 chk(W0) cpuid(AVX) p_66),
+ [0x0f] = X86_OP_ENTRY3(VTESTPD, None,None, V,x, W,x, vex6 chk(W0) cpuid(AVX) p_66),
+
+ [0x18] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 chk(W0) cpuid(AVX) p_66), /* vbroadcastss */
+ [0x19] = X86_OP_ENTRY3(VPBROADCASTQ, V,qq, None,None, W,q, vex6 chk(W0) cpuid(AVX) p_66), /* vbroadcastsd */
+ [0x1a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 chk(W0) cpuid(AVX) p_66),
[0x1c] = X86_OP_ENTRY3(PABSB, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66),
[0x1d] = X86_OP_ENTRY3(PABSW, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66),
[0x1e] = X86_OP_ENTRY3(PABSD, V,x, None,None, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66),
@@ -407,11 +457,11 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = {
[0x29] = X86_OP_ENTRY3(PCMPEQQ, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66),
[0x2a] = X86_OP_ENTRY3(MOVDQ, V,x, None,None, WM,x, vex1 cpuid(SSE41) avx2_256 p_66), /* movntdqa */
[0x2b] = X86_OP_ENTRY3(VPACKUSDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66),
- [0x2c] = X86_OP_ENTRY3(VMASKMOVPS, V,x, H,x, WM,x, vex6 cpuid(AVX) p_66),
- [0x2d] = X86_OP_ENTRY3(VMASKMOVPD, V,x, H,x, WM,x, vex6 cpuid(AVX) p_66),
+ [0x2c] = X86_OP_ENTRY3(VMASKMOVPS, V,x, H,x, WM,x, vex6 chk(W0) cpuid(AVX) p_66),
+ [0x2d] = X86_OP_ENTRY3(VMASKMOVPD, V,x, H,x, WM,x, vex6 chk(W0) cpuid(AVX) p_66),
/* Incorrectly listed as Mx,Hx,Vx in the manual */
- [0x2e] = X86_OP_ENTRY3(VMASKMOVPS_st, M,x, V,x, H,x, vex6 cpuid(AVX) p_66),
- [0x2f] = X86_OP_ENTRY3(VMASKMOVPD_st, M,x, V,x, H,x, vex6 cpuid(AVX) p_66),
+ [0x2e] = X86_OP_ENTRY3(VMASKMOVPS_st, M,x, V,x, H,x, vex6 chk(W0) cpuid(AVX) p_66),
+ [0x2f] = X86_OP_ENTRY3(VMASKMOVPD_st, M,x, V,x, H,x, vex6 chk(W0) cpuid(AVX) p_66),
[0x38] = X86_OP_ENTRY3(PMINSB, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66),
[0x39] = X86_OP_ENTRY3(PMINSD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66),
@@ -422,12 +472,13 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = {
[0x3e] = X86_OP_ENTRY3(PMAXUW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66),
[0x3f] = X86_OP_ENTRY3(PMAXUD, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66),
- [0x58] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 cpuid(AVX2) p_66),
- [0x59] = X86_OP_ENTRY3(VPBROADCASTQ, V,x, None,None, W,q, vex6 cpuid(AVX2) p_66),
- [0x5a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 cpuid(AVX2) p_66),
+ /* VPBROADCASTQ not listed as W0 in table 2-16 */
+ [0x58] = X86_OP_ENTRY3(VPBROADCASTD, V,x, None,None, W,d, vex6 chk(W0) cpuid(AVX2) p_66),
+ [0x59] = X86_OP_ENTRY3(VPBROADCASTQ, V,x, None,None, W,q, vex6 chk(W0) cpuid(AVX2) p_66),
+ [0x5a] = X86_OP_ENTRY3(VBROADCASTx128, V,qq, None,None, WM,dq,vex6 chk(W0) cpuid(AVX2) p_66),
- [0x78] = X86_OP_ENTRY3(VPBROADCASTB, V,x, None,None, W,b, vex6 cpuid(AVX2) p_66),
- [0x79] = X86_OP_ENTRY3(VPBROADCASTW, V,x, None,None, W,w, vex6 cpuid(AVX2) p_66),
+ [0x78] = X86_OP_ENTRY3(VPBROADCASTB, V,x, None,None, W,b, vex6 chk(W0) cpuid(AVX2) p_66),
+ [0x79] = X86_OP_ENTRY3(VPBROADCASTW, V,x, None,None, W,w, vex6 chk(W0) cpuid(AVX2) p_66),
[0x8c] = X86_OP_ENTRY3(VPMASKMOV, V,x, H,x, WM,x, vex6 cpuid(AVX2) p_66),
[0x8e] = X86_OP_ENTRY3(VPMASKMOV_st, M,x, V,x, H,x, vex6 cpuid(AVX2) p_66),
@@ -460,6 +511,13 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = {
[0xbe] = X86_OP_ENTRY3(VFNMSUB231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66),
[0xbf] = X86_OP_ENTRY3(VFNMSUB231Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66),
+ [0xc8] = X86_OP_ENTRY2(SHA1NEXTE, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xc9] = X86_OP_ENTRY2(SHA1MSG1, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xca] = X86_OP_ENTRY2(SHA1MSG2, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xcb] = X86_OP_ENTRY2(SHA256RNDS2, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xcc] = X86_OP_ENTRY2(SHA256MSG1, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xcd] = X86_OP_ENTRY2(SHA256MSG2, V,dq, W,dq, cpuid(SHA_NI)),
+
[0xdb] = X86_OP_ENTRY3(VAESIMC, V,dq, None,None, W,dq, vex4 cpuid(AES) p_66),
[0xdc] = X86_OP_ENTRY3(VAESENC, V,x, H,x, W,x, vex4 cpuid(AES) p_66),
[0xdd] = X86_OP_ENTRY3(VAESENCLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66),
@@ -554,18 +612,18 @@ static const X86OpEntry opcodes_0F3A[256] = {
* Also the "qq" instructions are sometimes omitted by Table 2-17, but are VEX256
* only.
*/
- [0x00] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 cpuid(AVX2) p_66),
- [0x01] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 cpuid(AVX2) p_66), /* VPERMPD */
- [0x02] = X86_OP_ENTRY4(VBLENDPS, V,x, H,x, W,x, vex6 cpuid(AVX2) p_66), /* VPBLENDD */
- [0x04] = X86_OP_ENTRY3(VPERMILPS_i, V,x, W,x, I,b, vex6 cpuid(AVX) p_66),
- [0x05] = X86_OP_ENTRY3(VPERMILPD_i, V,x, W,x, I,b, vex6 cpuid(AVX) p_66),
- [0x06] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 cpuid(AVX) p_66),
+ [0x00] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 chk(W1) cpuid(AVX2) p_66),
+ [0x01] = X86_OP_ENTRY3(VPERMQ, V,qq, W,qq, I,b, vex6 chk(W1) cpuid(AVX2) p_66), /* VPERMPD */
+ [0x02] = X86_OP_ENTRY4(VBLENDPS, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX2) p_66), /* VPBLENDD */
+ [0x04] = X86_OP_ENTRY3(VPERMILPS_i, V,x, W,x, I,b, vex6 chk(W0) cpuid(AVX) p_66),
+ [0x05] = X86_OP_ENTRY3(VPERMILPD_i, V,x, W,x, I,b, vex6 chk(W0) cpuid(AVX) p_66),
+ [0x06] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66),
[0x14] = X86_OP_ENTRY3(PEXTRB, E,b, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66),
[0x15] = X86_OP_ENTRY3(PEXTRW, E,w, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66),
[0x16] = X86_OP_ENTRY3(PEXTR, E,y, V,dq, I,b, vex5 cpuid(SSE41) p_66),
[0x17] = X86_OP_ENTRY3(VEXTRACTPS, E,d, V,dq, I,b, vex5 cpuid(SSE41) p_66),
- [0x1d] = X86_OP_ENTRY3(VCVTPS2PH, W,xh, V,x, I,b, vex11 cpuid(F16C) p_66),
+ [0x1d] = X86_OP_ENTRY3(VCVTPS2PH, W,xh, V,x, I,b, vex11 chk(W0) cpuid(F16C) p_66),
[0x20] = X86_OP_ENTRY4(PINSRB, V,dq, H,dq, E,b, vex5 cpuid(SSE41) zext2 p_66),
[0x21] = X86_OP_GROUP0(VINSERTPS),
@@ -575,7 +633,7 @@ static const X86OpEntry opcodes_0F3A[256] = {
[0x41] = X86_OP_ENTRY4(VDDPD, V,dq, H,dq, W,dq, vex2 cpuid(SSE41) p_66),
[0x42] = X86_OP_ENTRY4(VMPSADBW, V,x, H,x, W,x, vex2 cpuid(SSE41) avx2_256 p_66),
[0x44] = X86_OP_ENTRY4(PCLMULQDQ, V,dq, H,dq, W,dq, vex4 cpuid(PCLMULQDQ) p_66),
- [0x46] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66),
+ [0x46] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66),
[0x60] = X86_OP_ENTRY4(PCMPESTRM, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66),
[0x61] = X86_OP_ENTRY4(PCMPESTRI, None,None, V,dq, W,dq, vex4_unal cpuid(SSE42) p_66),
@@ -598,16 +656,18 @@ static const X86OpEntry opcodes_0F3A[256] = {
[0x0e] = X86_OP_ENTRY4(VPBLENDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66),
[0x0f] = X86_OP_ENTRY4(PALIGNR, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66),
- [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 cpuid(AVX) p_66),
- [0x19] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 cpuid(AVX) p_66),
+ [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66),
+ [0x19] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX) p_66),
- [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 cpuid(AVX2) p_66),
- [0x39] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 cpuid(AVX2) p_66),
+ [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66),
+ [0x39] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX2) p_66),
/* Listed incorrectly as type 4 */
- [0x4a] = X86_OP_ENTRY4(VBLENDVPS, V,x, H,x, W,x, vex6 cpuid(AVX) p_66),
- [0x4b] = X86_OP_ENTRY4(VBLENDVPD, V,x, H,x, W,x, vex6 cpuid(AVX) p_66),
- [0x4c] = X86_OP_ENTRY4(VPBLENDVB, V,x, H,x, W,x, vex6 cpuid(AVX) p_66 avx2_256),
+ [0x4a] = X86_OP_ENTRY4(VBLENDVPS, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66),
+ [0x4b] = X86_OP_ENTRY4(VBLENDVPD, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66),
+ [0x4c] = X86_OP_ENTRY4(VPBLENDVB, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66 avx2_256),
+
+ [0xcc] = X86_OP_ENTRY3(SHA1RNDS4, V,dq, W,dq, I,b, cpuid(SHA_NI)),
[0xdf] = X86_OP_ENTRY3(VAESKEYGEN, V,dq, W,dq, I,b, vex4 cpuid(AES) p_66),
@@ -1456,6 +1516,8 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid)
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2);
case X86_FEAT_AVX2:
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_AVX2);
+ case X86_FEAT_SHA_NI:
+ return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SHA_NI);
}
g_assert_not_reached();
}
@@ -1493,8 +1555,6 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode)
}
}
- /* TODO: instructions that require VEX.W=0 (Table 2-16) */
-
switch (e->vex_class) {
case 0:
if (s->prefix & PREFIX_VEX) {
@@ -1579,6 +1639,24 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode)
if (s->flags & HF_EM_MASK) {
goto illegal;
}
+
+ if (e->check) {
+ if (e->check & X86_CHECK_VEX128) {
+ if (s->vex_l) {
+ goto illegal;
+ }
+ }
+ if (e->check & X86_CHECK_W0) {
+ if (s->vex_w) {
+ goto illegal;
+ }
+ }
+ if (e->check & X86_CHECK_W1) {
+ if (!s->vex_w) {
+ goto illegal;
+ }
+ }
+ }
return true;
nm_exception:
@@ -1764,6 +1842,25 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
goto illegal_op;
}
+ /* Checks that result in #UD come first. */
+ if (decode.e.check) {
+ if (decode.e.check & X86_CHECK_i64) {
+ if (CODE64(s)) {
+ goto illegal_op;
+ }
+ }
+ if (decode.e.check & X86_CHECK_o64) {
+ if (!CODE64(s)) {
+ goto illegal_op;
+ }
+ }
+ if (decode.e.check & X86_CHECK_prot) {
+ if (!PE(s) || VM86(s)) {
+ goto illegal_op;
+ }
+ }
+ }
+
switch (decode.e.special) {
case X86_SPECIAL_None:
break;
@@ -1774,23 +1871,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
}
break;
- case X86_SPECIAL_ProtMode:
- if (!PE(s) || VM86(s)) {
- goto illegal_op;
- }
- break;
-
- case X86_SPECIAL_i64:
- if (CODE64(s)) {
- goto illegal_op;
- }
- break;
- case X86_SPECIAL_o64:
- if (!CODE64(s)) {
- goto illegal_op;
- }
- break;
-
case X86_SPECIAL_ZExtOp0:
assert(decode.op[0].unit == X86_OP_INT);
if (!decode.op[0].has_ea) {
@@ -1820,6 +1900,37 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
if (!validate_vex(s, &decode)) {
return;
}
+
+ /*
+ * Checks that result in #GP or VMEXIT come second. Intercepts are
+ * generally checked after non-memory exceptions (i.e. before all
+ * exceptions if there is no memory operand). Exceptions are
+ * vm86 checks (INTn, IRET, PUSHF/POPF), RSM and XSETBV (!).
+ *
+ * RSM and XSETBV will be handled in the gen_* functions
+ * instead of using chk().
+ */
+ if (decode.e.check & X86_CHECK_cpl0) {
+ if (CPL(s) != 0) {
+ goto gp_fault;
+ }
+ }
+ if (decode.e.intercept && unlikely(GUEST(s))) {
+ gen_helper_svm_check_intercept(tcg_env,
+ tcg_constant_i32(decode.e.intercept));
+ }
+ if (decode.e.check) {
+ if ((decode.e.check & X86_CHECK_vm86_iopl) && VM86(s)) {
+ if (IOPL(s) < 3) {
+ goto gp_fault;
+ }
+ } else if (decode.e.check & X86_CHECK_cpl_iopl) {
+ if (IOPL(s) < CPL(s)) {
+ goto gp_fault;
+ }
+ }
+ }
+
if (decode.e.special == X86_SPECIAL_MMX &&
!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) {
gen_helper_enter_mmx(tcg_env);
@@ -1846,6 +1957,9 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
gen_writeback(s, &decode, 0, s->T0);
}
return;
+ gp_fault:
+ gen_exception_gpf(s);
+ return;
illegal_op:
gen_illegal_opcode(s);
return;
diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h
index a542ec1681..e6c904a319 100644
--- a/target/i386/tcg/decode-new.h
+++ b/target/i386/tcg/decode-new.h
@@ -108,6 +108,7 @@ typedef enum X86CPUIDFeature {
X86_FEAT_FMA,
X86_FEAT_MOVBE,
X86_FEAT_PCLMULQDQ,
+ X86_FEAT_SHA_NI,
X86_FEAT_SSE,
X86_FEAT_SSE2,
X86_FEAT_SSE3,
@@ -130,15 +131,36 @@ typedef enum X86OpUnit {
X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */
} X86OpUnit;
+typedef enum X86InsnCheck {
+ /* Illegal or exclusive to 64-bit mode */
+ X86_CHECK_i64 = 1,
+ X86_CHECK_o64 = 2,
+
+ /* Fault outside protected mode */
+ X86_CHECK_prot = 4,
+
+ /* Privileged instruction checks */
+ X86_CHECK_cpl0 = 8,
+ X86_CHECK_vm86_iopl = 16,
+ X86_CHECK_cpl_iopl = 32,
+ X86_CHECK_iopl = X86_CHECK_cpl_iopl | X86_CHECK_vm86_iopl,
+
+ /* Fault if VEX.L=1 */
+ X86_CHECK_VEX128 = 64,
+
+ /* Fault if VEX.W=1 */
+ X86_CHECK_W0 = 128,
+
+ /* Fault if VEX.W=0 */
+ X86_CHECK_W1 = 256,
+} X86InsnCheck;
+
typedef enum X86InsnSpecial {
X86_SPECIAL_None,
/* Always locked if it has a memory operand (XCHG) */
X86_SPECIAL_Locked,
- /* Fault outside protected mode */
- X86_SPECIAL_ProtMode,
-
/*
* Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw
* in the manual.
@@ -157,10 +179,6 @@ typedef enum X86InsnSpecial {
* become P/P/Q/N, and size "x" becomes "q".
*/
X86_SPECIAL_MMX,
-
- /* Illegal or exclusive to 64-bit mode */
- X86_SPECIAL_i64,
- X86_SPECIAL_o64,
} X86InsnSpecial;
/*
@@ -223,7 +241,9 @@ struct X86OpEntry {
X86CPUIDFeature cpuid:8;
unsigned vex_class:8;
X86VEXSpecial vex_special:8;
- uint16_t valid_prefix:16;
+ unsigned valid_prefix:16;
+ unsigned check:16;
+ unsigned intercept:8;
bool is_decode:1;
};
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 88793ba988..82da5488d4 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -1236,10 +1236,6 @@ static void gen_INSERTQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *dec
static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- if (s->vex_l) {
- gen_illegal_opcode(s);
- return;
- }
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T1);
gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
}
@@ -1800,6 +1796,60 @@ static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
tcg_gen_sar_tl(s->T0, s->T0, s->T1);
}
+static void gen_SHA1NEXTE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha1nexte(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA1MSG1(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha1msg1(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA1MSG2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha1msg2(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA1RNDS4(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ switch(decode->immediate & 3) {
+ case 0:
+ gen_helper_sha1rnds4_f0(OP_PTR0, OP_PTR0, OP_PTR1);
+ break;
+ case 1:
+ gen_helper_sha1rnds4_f1(OP_PTR0, OP_PTR0, OP_PTR1);
+ break;
+ case 2:
+ gen_helper_sha1rnds4_f2(OP_PTR0, OP_PTR0, OP_PTR1);
+ break;
+ case 3:
+ gen_helper_sha1rnds4_f3(OP_PTR0, OP_PTR0, OP_PTR1);
+ break;
+ }
+}
+
+static void gen_SHA256MSG1(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha256msg1(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA256MSG2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha256msg2(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA256RNDS2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ TCGv_i32 wk0 = tcg_temp_new_i32();
+ TCGv_i32 wk1 = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(wk0, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_ld_i32(wk1, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(1)));
+
+ gen_helper_sha256rnds2(OP_PTR0, OP_PTR1, OP_PTR2, wk0, wk1);
+}
+
static void gen_SHLX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
MemOp ot = decode->op[0].ot;
@@ -1832,10 +1882,6 @@ static void gen_VAESKEYGEN(DisasContext *s, CPUX86State *env, X86DecodedInsn *de
static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- if (s->vex_l) {
- gen_illegal_opcode(s);
- return;
- }
gen_helper_update_mxcsr(tcg_env);
tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
}
diff --git a/target/i386/tcg/ops_sse_header.h.inc b/target/i386/tcg/ops_sse_header.h.inc
index 8a7b2f4e2f..d92c6faf6d 100644
--- a/target/i386/tcg/ops_sse_header.h.inc
+++ b/target/i386/tcg/ops_sse_header.h.inc
@@ -399,6 +399,20 @@ DEF_HELPER_3(vpermq_ymm, void, Reg, Reg, i32)
#endif
#endif
+/* SHA helpers */
+#if SHIFT == 1
+DEF_HELPER_3(sha1rnds4_f0, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1rnds4_f1, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1rnds4_f2, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1rnds4_f3, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1nexte, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1msg1, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1msg2, void, Reg, Reg, Reg)
+DEF_HELPER_5(sha256rnds2, void, Reg, Reg, Reg, i32, i32)
+DEF_HELPER_3(sha256msg1, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha256msg2, void, Reg, Reg, Reg)
+#endif
+
#undef SHIFT
#undef Reg
#undef SUFFIX
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index 4f6f9fa7e5..587d88692a 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -701,33 +701,11 @@ static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
{
- switch (size) {
- case MO_8:
- if (sign) {
- tcg_gen_ext8s_tl(dst, src);
- } else {
- tcg_gen_ext8u_tl(dst, src);
- }
- return dst;
- case MO_16:
- if (sign) {
- tcg_gen_ext16s_tl(dst, src);
- } else {
- tcg_gen_ext16u_tl(dst, src);
- }
- return dst;
-#ifdef TARGET_X86_64
- case MO_32:
- if (sign) {
- tcg_gen_ext32s_tl(dst, src);
- } else {
- tcg_gen_ext32u_tl(dst, src);
- }
- return dst;
-#endif
- default:
+ if (size == MO_TL) {
return src;
}
+ tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
+ return dst;
}
static void gen_extu(MemOp ot, TCGv reg)
@@ -2918,59 +2896,54 @@ static inline void gen_stq_env_A0(DisasContext *s, int offset)
static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
{
+ MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
+ ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
+ MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
int mem_index = s->mem_index;
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
- MO_LEUQ | (align ? MO_ALIGN_16 : 0));
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_addi_tl(s->tmp0, s->A0, 8);
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1)));
+ TCGv_i128 t = tcg_temp_new_i128();
+
+ tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
+ tcg_gen_st_i128(t, tcg_env, offset);
}
static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
{
+ MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
+ ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
+ MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
int mem_index = s->mem_index;
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
- MO_LEUQ | (align ? MO_ALIGN_16 : 0));
- tcg_gen_addi_tl(s->tmp0, s->A0, 8);
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
+ TCGv_i128 t = tcg_temp_new_i128();
+
+ tcg_gen_ld_i128(t, tcg_env, offset);
+ tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
}
static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
{
+ MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
int mem_index = s->mem_index;
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
- MO_LEUQ | (align ? MO_ALIGN_32 : 0));
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0)));
- tcg_gen_addi_tl(s->tmp0, s->A0, 8);
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1)));
+ TCGv_i128 t0 = tcg_temp_new_i128();
+ TCGv_i128 t1 = tcg_temp_new_i128();
+ tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
tcg_gen_addi_tl(s->tmp0, s->A0, 16);
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2)));
- tcg_gen_addi_tl(s->tmp0, s->A0, 24);
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3)));
+ tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
+
+ tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
+ tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
}
static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
{
+ MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
int mem_index = s->mem_index;
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
- MO_LEUQ | (align ? MO_ALIGN_32 : 0));
- tcg_gen_addi_tl(s->tmp0, s->A0, 8);
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
+ TCGv_i128 t = tcg_temp_new_i128();
+
+ tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
+ tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
tcg_gen_addi_tl(s->tmp0, s->A0, 16);
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
- tcg_gen_addi_tl(s->tmp0, s->A0, 24);
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
+ tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
+ tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
}
#include "decode-new.h"
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
index df3aba2642..d29ba916a0 100644
--- a/target/i386/whpx/whpx-all.c
+++ b/target/i386/whpx/whpx-all.c
@@ -2160,9 +2160,8 @@ int whpx_init_vcpu(CPUState *cpu)
"State blocked due to non-migratable CPUID feature support,"
"dirty memory tracking support, and XSAVE/XRSTOR support");
- if (migrate_add_blocker(whpx_migration_blocker, &local_error) < 0) {
+ if (migrate_add_blocker(&whpx_migration_blocker, &local_error) < 0) {
error_report_err(local_error);
- error_free(whpx_migration_blocker);
ret = -EINVAL;
goto error;
}
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 4d0110de95..4a0b0b2703 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -520,21 +520,9 @@ static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
{
switch (opsize) {
case OS_BYTE:
- if (sign) {
- tcg_gen_ext8s_i32(res, val);
- } else {
- tcg_gen_ext8u_i32(res, val);
- }
- break;
case OS_WORD:
- if (sign) {
- tcg_gen_ext16s_i32(res, val);
- } else {
- tcg_gen_ext16u_i32(res, val);
- }
- break;
case OS_LONG:
- tcg_gen_mov_i32(res, val);
+ tcg_gen_ext_i32(res, val, opsize | (sign ? MO_SIGN : 0));
break;
default:
g_assert_not_reached();
@@ -1072,15 +1060,10 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
tmp = tcg_temp_new();
switch (opsize) {
case OS_BYTE:
- tcg_gen_ext8s_i32(tmp, reg);
- gen_helper_exts32(tcg_env, fp, tmp);
- break;
case OS_WORD:
- tcg_gen_ext16s_i32(tmp, reg);
- gen_helper_exts32(tcg_env, fp, tmp);
- break;
case OS_LONG:
- gen_helper_exts32(tcg_env, fp, reg);
+ tcg_gen_ext_i32(tmp, reg, opsize | MO_SIGN);
+ gen_helper_exts32(tcg_env, fp, tmp);
break;
case OS_SINGLE:
gen_helper_extf32(tcg_env, fp, reg);
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
index 67f8e8b988..5fddceff3a 100644
--- a/target/mips/cpu.h
+++ b/target/mips/cpu.h
@@ -1345,11 +1345,10 @@ uint64_t cpu_mips_phys_to_kseg1(void *opaque, uint64_t addr);
#if !defined(CONFIG_USER_ONLY)
-/* mips_int.c */
+/* HW declaration specific to the MIPS target */
void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level);
-
-/* mips_itu.c */
-void itc_reconfigure(struct MIPSITUState *tag);
+void cpu_mips_irq_init_cpu(MIPSCPU *cpu);
+void cpu_mips_clock_init(MIPSCPU *cpu);
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/mips/sysemu/cp0_timer.c b/target/mips/sysemu/cp0_timer.c
index 9d2bcb0dea..62de502caa 100644
--- a/target/mips/sysemu/cp0_timer.c
+++ b/target/mips/sysemu/cp0_timer.c
@@ -22,7 +22,6 @@
#include "qemu/osdep.h"
#include "hw/irq.h"
-#include "hw/mips/cpudevs.h"
#include "qemu/timer.h"
#include "sysemu/kvm.h"
#include "internal.h"
diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c
index 5da1124589..d349548743 100644
--- a/target/mips/tcg/sysemu/cp0_helper.c
+++ b/target/mips/tcg/sysemu/cp0_helper.c
@@ -28,6 +28,7 @@
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
+#include "hw/misc/mips_itu.h"
/* SMP helpers. */
diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c
index 7dbc2e24c4..4ede904800 100644
--- a/target/mips/tcg/sysemu/tlb_helper.c
+++ b/target/mips/tcg/sysemu/tlb_helper.c
@@ -24,7 +24,6 @@
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "exec/log.h"
-#include "hw/mips/cpudevs.h"
#include "exec/helper-proto.h"
/* TLB management */
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index 090d617627..26e68c7ab4 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -1420,7 +1420,7 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
exit(1);
}
- kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
+ kvm_msi_via_irqfd_allowed = true;
}
static void kvm_cpu_instance_init(CPUState *cs)
diff --git a/target/rx/translate.c b/target/rx/translate.c
index f8860830ae..c6ce717a95 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -492,13 +492,11 @@ static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
/* mov.<bwl> rs,rd */
static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
{
- static void (* const mov[])(TCGv ret, TCGv arg) = {
- tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32,
- };
TCGv tmp, mem, addr;
+
if (a->lds == 3 && a->ldd == 3) {
/* mov.<bwl> rs,rd */
- mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
+ tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz | MO_SIGN);
return true;
}
@@ -570,10 +568,7 @@ static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
/* movu.<bw> rs,rd */
static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
{
- static void (* const ext[])(TCGv ret, TCGv arg) = {
- tcg_gen_ext8u_i32, tcg_gen_ext16u_i32,
- };
- ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
+ tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz);
return true;
}
diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c
index 8112561e5e..1cd30c1d84 100644
--- a/target/s390x/cpu-sysemu.c
+++ b/target/s390x/cpu-sysemu.c
@@ -307,3 +307,16 @@ void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg)
kvm_s390_set_diag318(cs, arg.host_ulong);
}
}
+
+void s390_cpu_topology_set_changed(bool changed)
+{
+ int ret;
+
+ if (kvm_enabled()) {
+ ret = kvm_s390_topology_set_mtcr(changed);
+ if (ret) {
+ error_report("Failed to set Modified Topology Change Report: %s",
+ strerror(-ret));
+ }
+ }
+}
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index 6093ab0a12..6acfa1c91b 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -31,12 +31,14 @@
#include "qapi/qapi-types-machine.h"
#include "sysemu/hw_accel.h"
#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
#include "fpu/softfloat-helpers.h"
#include "disas/capstone.h"
#include "sysemu/tcg.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/reset.h"
#endif
+#include "hw/s390x/cpu-topology.h"
#define CR0_RESET 0xE0UL
#define CR14_RESET 0xC2000000UL;
@@ -145,6 +147,14 @@ static void s390_query_cpu_fast(CPUState *cpu, CpuInfoFast *value)
S390CPU *s390_cpu = S390_CPU(cpu);
value->u.s390x.cpu_state = s390_cpu->env.cpu_state;
+#if !defined(CONFIG_USER_ONLY)
+ if (s390_has_topology()) {
+ value->u.s390x.has_dedicated = true;
+ value->u.s390x.dedicated = s390_cpu->env.dedicated;
+ value->u.s390x.has_entitlement = true;
+ value->u.s390x.entitlement = s390_cpu->env.entitlement;
+ }
+#endif
}
/* S390CPUClass::reset() */
@@ -290,6 +300,12 @@ static const gchar *s390_gdb_arch_name(CPUState *cs)
static Property s390x_cpu_properties[] = {
#if !defined(CONFIG_USER_ONLY)
DEFINE_PROP_UINT32("core-id", S390CPU, env.core_id, 0),
+ DEFINE_PROP_INT32("socket-id", S390CPU, env.socket_id, -1),
+ DEFINE_PROP_INT32("book-id", S390CPU, env.book_id, -1),
+ DEFINE_PROP_INT32("drawer-id", S390CPU, env.drawer_id, -1),
+ DEFINE_PROP_BOOL("dedicated", S390CPU, env.dedicated, false),
+ DEFINE_PROP_CPUS390ENTITLEMENT("entitlement", S390CPU, env.entitlement,
+ S390_CPU_ENTITLEMENT_AUTO),
#endif
DEFINE_PROP_END_OF_LIST()
};
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index 7bea7075e1..40c5cedd0e 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -30,6 +30,7 @@
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
#include "tcg/tcg_s390x.h"
+#include "qapi/qapi-types-machine-common.h"
#define ELF_MACHINE_UNAME "S390X"
@@ -132,6 +133,11 @@ struct CPUArchState {
#if !defined(CONFIG_USER_ONLY)
uint32_t core_id; /* PoP "CPU address", same as cpu_index */
+ int32_t socket_id;
+ int32_t book_id;
+ int32_t drawer_id;
+ bool dedicated;
+ CpuS390Entitlement entitlement; /* Used only for vertical polarization */
uint64_t cpuid;
#endif
@@ -564,6 +570,29 @@ typedef struct SysIB_322 {
} SysIB_322;
QEMU_BUILD_BUG_ON(sizeof(SysIB_322) != 4096);
+/*
+ * Topology Magnitude fields (MAG) indicates the maximum number of
+ * topology list entries (TLE) at the corresponding nesting level.
+ */
+#define S390_TOPOLOGY_MAG 6
+#define S390_TOPOLOGY_MAG6 0
+#define S390_TOPOLOGY_MAG5 1
+#define S390_TOPOLOGY_MAG4 2
+#define S390_TOPOLOGY_MAG3 3
+#define S390_TOPOLOGY_MAG2 4
+#define S390_TOPOLOGY_MAG1 5
+/* Configuration topology */
+typedef struct SysIB_151x {
+ uint8_t reserved0[2];
+ uint16_t length;
+ uint8_t mag[S390_TOPOLOGY_MAG];
+ uint8_t reserved1;
+ uint8_t mnest;
+ uint32_t reserved2;
+ char tle[];
+} SysIB_151x;
+QEMU_BUILD_BUG_ON(sizeof(SysIB_151x) != 16);
+
typedef union SysIB {
SysIB_111 sysib_111;
SysIB_121 sysib_121;
@@ -571,9 +600,62 @@ typedef union SysIB {
SysIB_221 sysib_221;
SysIB_222 sysib_222;
SysIB_322 sysib_322;
+ SysIB_151x sysib_151x;
} SysIB;
QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096);
+/*
+ * CPU Topology List provided by STSI with fc=15 provides a list
+ * of two different Topology List Entries (TLE) types to specify
+ * the topology hierarchy.
+ *
+ * - Container Topology List Entry
+ * Defines a container to contain other Topology List Entries
+ * of any type, nested containers or CPU.
+ * - CPU Topology List Entry
+ * Specifies the CPUs position, type, entitlement and polarization
+ * of the CPUs contained in the last container TLE.
+ *
+ * There can be theoretically up to five levels of containers, QEMU
+ * uses only three levels, the drawer's, book's and socket's level.
+ *
+ * A container with a nesting level (NL) greater than 1 can only
+ * contain another container of nesting level NL-1.
+ *
+ * A container of nesting level 1 (socket), contains as many CPU TLE
+ * as needed to describe the position and qualities of all CPUs inside
+ * the container.
+ * The qualities of a CPU are polarization, entitlement and type.
+ *
+ * The CPU TLE defines the position of the CPUs of identical qualities
+ * using a 64bits mask which first bit has its offset defined by
+ * the CPU address origin field of the CPU TLE like in:
+ * CPU address = origin * 64 + bit position within the mask
+ */
+/* Container type Topology List Entry */
+typedef struct SYSIBContainerListEntry {
+ uint8_t nl;
+ uint8_t reserved[6];
+ uint8_t id;
+} SYSIBContainerListEntry;
+QEMU_BUILD_BUG_ON(sizeof(SYSIBContainerListEntry) != 8);
+
+/* CPU type Topology List Entry */
+typedef struct SysIBCPUListEntry {
+ uint8_t nl;
+ uint8_t reserved0[3];
+#define SYSIB_TLE_POLARITY_MASK 0x03
+#define SYSIB_TLE_DEDICATED 0x04
+ uint8_t flags;
+ uint8_t type;
+ uint16_t origin;
+ uint64_t mask;
+} SysIBCPUListEntry;
+QEMU_BUILD_BUG_ON(sizeof(SysIBCPUListEntry) != 16);
+
+void insert_stsi_15_1_x(S390CPU *cpu, int sel2, uint64_t addr, uint8_t ar, uintptr_t ra);
+void s390_cpu_topology_set_changed(bool changed);
+
/* MMU defines */
#define ASCE_ORIGIN (~0xfffULL) /* segment table origin */
#define ASCE_SUBSPACE 0x200 /* subspace group control */
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index 98f14c09c2..4dead48650 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -255,6 +255,7 @@ bool s390_has_feat(S390Feat feat)
case S390_FEAT_SIE_CMMA:
case S390_FEAT_SIE_PFMFI:
case S390_FEAT_SIE_IBS:
+ case S390_FEAT_CONFIGURATION_TOPOLOGY:
return false;
break;
default:
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
index bc5c56a305..0f0e784b2a 100644
--- a/target/s390x/kvm/kvm.c
+++ b/target/s390x/kvm/kvm.c
@@ -86,6 +86,7 @@
#define PRIV_B9_EQBS 0x9c
#define PRIV_B9_CLP 0xa0
+#define PRIV_B9_PTF 0xa2
#define PRIV_B9_PCISTG 0xd0
#define PRIV_B9_PCILG 0xd2
#define PRIV_B9_RPCIT 0xd3
@@ -138,7 +139,6 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
-static int cap_sync_regs;
static int cap_async_pf;
static int cap_mem_op;
static int cap_mem_op_extension;
@@ -337,21 +337,28 @@ int kvm_arch_get_default_type(MachineState *ms)
int kvm_arch_init(MachineState *ms, KVMState *s)
{
+ int required_caps[] = {
+ KVM_CAP_DEVICE_CTRL,
+ KVM_CAP_SYNC_REGS,
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(required_caps); i++) {
+ if (!kvm_check_extension(s, required_caps[i])) {
+ error_report("KVM is missing capability #%d - "
+ "please use kernel 3.15 or newer", required_caps[i]);
+ return -1;
+ }
+ }
+
object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE,
false, NULL);
- if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) {
- error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - "
- "please use kernel 3.15 or newer");
- return -1;
- }
if (!kvm_check_extension(s, KVM_CAP_S390_COW)) {
error_report("KVM is missing capability KVM_CAP_S390_COW - "
"unsupported environment");
return -1;
}
- cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION);
@@ -365,6 +372,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
+ kvm_vm_enable_cap(s, KVM_CAP_S390_CPU_TOPOLOGY, 0);
if (ri_allowed()) {
if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
cap_ri = 1;
@@ -458,37 +466,28 @@ void kvm_s390_reset_vcpu_normal(S390CPU *cpu)
static int can_sync_regs(CPUState *cs, int regs)
{
- return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
+ return (cs->kvm_run->kvm_valid_regs & regs) == regs;
}
+#define KVM_SYNC_REQUIRED_REGS (KVM_SYNC_GPRS | KVM_SYNC_ACRS | \
+ KVM_SYNC_CRS | KVM_SYNC_PREFIX)
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
- struct kvm_sregs sregs;
- struct kvm_regs regs;
struct kvm_fpu fpu = {};
int r;
int i;
+ g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS));
+
/* always save the PSW and the GPRS*/
cs->kvm_run->psw_addr = env->psw.addr;
cs->kvm_run->psw_mask = env->psw.mask;
- if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
- for (i = 0; i < 16; i++) {
- cs->kvm_run->s.regs.gprs[i] = env->regs[i];
- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
- }
- } else {
- for (i = 0; i < 16; i++) {
- regs.gprs[i] = env->regs[i];
- }
- r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
- if (r < 0) {
- return r;
- }
- }
+ memcpy(cs->kvm_run->s.regs.gprs, env->regs, sizeof(cs->kvm_run->s.regs.gprs));
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
if (can_sync_regs(cs, KVM_SYNC_VRS)) {
for (i = 0; i < 32; i++) {
@@ -521,6 +520,15 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return 0;
}
+ /*
+ * Access registers, control registers and the prefix - these are
+ * always available via kvm_sync_regs in the kernels that we support
+ */
+ memcpy(cs->kvm_run->s.regs.acrs, env->aregs, sizeof(cs->kvm_run->s.regs.acrs));
+ memcpy(cs->kvm_run->s.regs.crs, env->cregs, sizeof(cs->kvm_run->s.regs.crs));
+ cs->kvm_run->s.regs.prefix = env->psa;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS | KVM_SYNC_CRS | KVM_SYNC_PREFIX;
+
if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
cs->kvm_run->s.regs.cputm = env->cputm;
cs->kvm_run->s.regs.ckc = env->ckc;
@@ -567,25 +575,6 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
}
- /* access registers and control registers*/
- if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
- for (i = 0; i < 16; i++) {
- cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
- cs->kvm_run->s.regs.crs[i] = env->cregs[i];
- }
- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
- } else {
- for (i = 0; i < 16; i++) {
- sregs.acrs[i] = env->aregs[i];
- sregs.crs[i] = env->cregs[i];
- }
- r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
- if (r < 0) {
- return r;
- }
- }
-
if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32);
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB;
@@ -607,13 +596,6 @@ int kvm_arch_put_registers(CPUState *cs, int level)
cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
}
- /* Finally the prefix */
- if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
- cs->kvm_run->s.regs.prefix = env->psa;
- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
- } else {
- /* prefix is only supported via sync regs */
- }
return 0;
}
@@ -621,8 +603,6 @@ int kvm_arch_get_registers(CPUState *cs)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
- struct kvm_sregs sregs;
- struct kvm_regs regs;
struct kvm_fpu fpu;
int i, r;
@@ -630,37 +610,14 @@ int kvm_arch_get_registers(CPUState *cs)
env->psw.addr = cs->kvm_run->psw_addr;
env->psw.mask = cs->kvm_run->psw_mask;
- /* the GPRS */
- if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
- for (i = 0; i < 16; i++) {
- env->regs[i] = cs->kvm_run->s.regs.gprs[i];
- }
- } else {
- r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
- if (r < 0) {
- return r;
- }
- for (i = 0; i < 16; i++) {
- env->regs[i] = regs.gprs[i];
- }
- }
+ /* the GPRS, ACRS and CRS */
+ g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS));
+ memcpy(env->regs, cs->kvm_run->s.regs.gprs, sizeof(env->regs));
+ memcpy(env->aregs, cs->kvm_run->s.regs.acrs, sizeof(env->aregs));
+ memcpy(env->cregs, cs->kvm_run->s.regs.crs, sizeof(env->cregs));
- /* The ACRS and CRS */
- if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
- for (i = 0; i < 16; i++) {
- env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
- env->cregs[i] = cs->kvm_run->s.regs.crs[i];
- }
- } else {
- r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
- if (r < 0) {
- return r;
- }
- for (i = 0; i < 16; i++) {
- env->aregs[i] = sregs.acrs[i];
- env->cregs[i] = sregs.crs[i];
- }
- }
+ /* The prefix */
+ env->psa = cs->kvm_run->s.regs.prefix;
/* Floating point and vector registers */
if (can_sync_regs(cs, KVM_SYNC_VRS)) {
@@ -685,11 +642,6 @@ int kvm_arch_get_registers(CPUState *cs)
env->fpc = fpu.fpc;
}
- /* The prefix */
- if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
- env->psa = cs->kvm_run->s.regs.prefix;
- }
-
if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
env->cputm = cs->kvm_run->s.regs.cputm;
env->ckc = cs->kvm_run->s.regs.ckc;
@@ -1457,6 +1409,13 @@ static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
}
}
+static void kvm_handle_ptf(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipb >> 20) & 0x0f;
+
+ s390_handle_ptf(cpu, r1, RA_IGNORED);
+}
+
static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
{
int r = 0;
@@ -1474,6 +1433,9 @@ static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
case PRIV_B9_RPCIT:
r = kvm_rpcit_service_call(cpu, run);
break;
+ case PRIV_B9_PTF:
+ kvm_handle_ptf(cpu, run);
+ break;
case PRIV_B9_EQBS:
/* just inject exception */
r = -1;
@@ -1911,9 +1873,12 @@ static int handle_stsi(S390CPU *cpu)
if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
return 0;
}
- /* Only sysib 3.2.2 needs post-handling for now. */
insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
return 0;
+ case 15:
+ insert_stsi_15_1_x(cpu, run->s390_stsi.sel2, run->s390_stsi.addr,
+ run->s390_stsi.ar, RA_IGNORED);
+ return 0;
default:
return 0;
}
@@ -2495,6 +2460,14 @@ void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
set_bit(S390_FEAT_UNPACK, model->features);
}
+ /*
+ * If we have kernel support for CPU Topology indicate the
+ * configuration-topology facility.
+ */
+ if (kvm_check_extension(kvm_state, KVM_CAP_S390_CPU_TOPOLOGY)) {
+ set_bit(S390_FEAT_CONFIGURATION_TOPOLOGY, model->features);
+ }
+
/* We emulate a zPCI bus and AEN, therefore we don't need HW support */
set_bit(S390_FEAT_ZPCI, model->features);
set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features);
@@ -2661,6 +2634,23 @@ int kvm_s390_get_zpci_op(void)
return cap_zpci_op;
}
+int kvm_s390_topology_set_mtcr(uint64_t attr)
+{
+ struct kvm_device_attr attribute = {
+ .group = KVM_S390_VM_CPU_TOPOLOGY,
+ .attr = attr,
+ };
+
+ if (!s390_has_feat(S390_FEAT_CONFIGURATION_TOPOLOGY)) {
+ return 0;
+ }
+ if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_TOPOLOGY, attr)) {
+ return -ENOTSUP;
+ }
+
+ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
+}
+
void kvm_arch_accel_class_init(ObjectClass *oc)
{
}
diff --git a/target/s390x/kvm/kvm_s390x.h b/target/s390x/kvm/kvm_s390x.h
index f9785564d0..649dae5948 100644
--- a/target/s390x/kvm/kvm_s390x.h
+++ b/target/s390x/kvm/kvm_s390x.h
@@ -47,5 +47,6 @@ void kvm_s390_crypto_reset(void);
void kvm_s390_restart_interrupt(S390CPU *cpu);
void kvm_s390_stop_interrupt(S390CPU *cpu);
void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info);
+int kvm_s390_topology_set_mtcr(uint64_t attr);
#endif /* KVM_S390X_H */
diff --git a/target/s390x/kvm/meson.build b/target/s390x/kvm/meson.build
index d6aca590ae..588a9aa737 100644
--- a/target/s390x/kvm/meson.build
+++ b/target/s390x/kvm/meson.build
@@ -1,7 +1,8 @@
s390x_ss.add(when: 'CONFIG_KVM', if_true: files(
'pv.c',
- 'kvm.c'
+ 'kvm.c',
+ 'stsi-topology.c'
), if_false: files(
'stubs.c'
))
diff --git a/target/s390x/kvm/stsi-topology.c b/target/s390x/kvm/stsi-topology.c
new file mode 100644
index 0000000000..efd2aa71f1
--- /dev/null
+++ b/target/s390x/kvm/stsi-topology.c
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QEMU S390x CPU Topology
+ *
+ * Copyright IBM Corp. 2022, 2023
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/cpu-topology.h"
+
+QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_LOW != 1);
+QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_MEDIUM != 2);
+QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_HIGH != 3);
+
+/**
+ * fill_container:
+ * @p: The address of the container TLE to fill
+ * @level: The level of nesting for this container
+ * @id: The container receives a unique ID inside its own container
+ *
+ * Returns the next free TLE entry.
+ */
+static char *fill_container(char *p, int level, int id)
+{
+ SYSIBContainerListEntry *tle = (SYSIBContainerListEntry *)p;
+
+ tle->nl = level;
+ tle->id = id;
+ return p + sizeof(*tle);
+}
+
+/**
+ * fill_tle_cpu:
+ * @p: The address of the CPU TLE to fill
+ * @entry: a pointer to the S390TopologyEntry defining this
+ * CPU container.
+ *
+ * Returns the next free TLE entry.
+ */
+static char *fill_tle_cpu(char *p, S390TopologyEntry *entry)
+{
+ SysIBCPUListEntry *tle = (SysIBCPUListEntry *)p;
+ S390TopologyId topology_id = entry->id;
+
+ tle->nl = 0;
+ tle->flags = 0;
+ if (topology_id.vertical) {
+ tle->flags |= topology_id.entitlement;
+ }
+ if (topology_id.dedicated) {
+ tle->flags |= SYSIB_TLE_DEDICATED;
+ }
+ tle->type = topology_id.type;
+ tle->origin = cpu_to_be16(topology_id.origin * 64);
+ tle->mask = cpu_to_be64(entry->mask);
+ return p + sizeof(*tle);
+}
+
+/*
+ * Macro to check that the size of data after increment
+ * will not get bigger than the size of the SysIB.
+ */
+#define SYSIB_GUARD(data, x) do { \
+ data += x; \
+ if (data > sizeof(SysIB)) { \
+ return 0; \
+ } \
+ } while (0)
+
+/**
+ * stsi_topology_fill_sysib:
+ * @p: A pointer to the position of the first TLE
+ * @level: The nested level wanted by the guest
+ *
+ * Fill the SYSIB with the topology information as described in
+ * the PoP, nesting containers as appropriate, with the maximum
+ * nesting limited by @level.
+ *
+ * Return value:
+ * On success: the size of the SysIB_15x after being filled with TLE.
+ * On error: 0 in the case we would overrun the end of the SysIB.
+ */
+static int stsi_topology_fill_sysib(S390TopologyList *topology_list,
+ char *p, int level)
+{
+ S390TopologyEntry *entry;
+ int last_drawer = -1;
+ int last_book = -1;
+ int last_socket = -1;
+ int drawer_id = 0;
+ int book_id = 0;
+ int socket_id = 0;
+ int n = sizeof(SysIB_151x);
+
+ QTAILQ_FOREACH(entry, topology_list, next) {
+ bool drawer_change = last_drawer != entry->id.drawer;
+ bool book_change = drawer_change || last_book != entry->id.book;
+ bool socket_change = book_change || last_socket != entry->id.socket;
+
+ if (level > 3 && drawer_change) {
+ SYSIB_GUARD(n, sizeof(SYSIBContainerListEntry));
+ p = fill_container(p, 3, drawer_id++);
+ book_id = 0;
+ }
+ if (level > 2 && book_change) {
+ SYSIB_GUARD(n, sizeof(SYSIBContainerListEntry));
+ p = fill_container(p, 2, book_id++);
+ socket_id = 0;
+ }
+ if (socket_change) {
+ SYSIB_GUARD(n, sizeof(SYSIBContainerListEntry));
+ p = fill_container(p, 1, socket_id++);
+ }
+
+ SYSIB_GUARD(n, sizeof(SysIBCPUListEntry));
+ p = fill_tle_cpu(p, entry);
+ last_drawer = entry->id.drawer;
+ last_book = entry->id.book;
+ last_socket = entry->id.socket;
+ }
+
+ return n;
+}
+
+/**
+ * setup_stsi:
+ * @topology_list: ordered list of groups of CPUs with same properties
+ * @sysib: pointer to a SysIB to be filled with SysIB_151x data
+ * @level: Nested level specified by the guest
+ *
+ * Setup the SYSIB for STSI 15.1, the header as well as the description
+ * of the topology.
+ */
+static int setup_stsi(S390TopologyList *topology_list, SysIB_151x *sysib,
+ int level)
+{
+ sysib->mnest = level;
+ switch (level) {
+ case 4:
+ sysib->mag[S390_TOPOLOGY_MAG4] = current_machine->smp.drawers;
+ sysib->mag[S390_TOPOLOGY_MAG3] = current_machine->smp.books;
+ sysib->mag[S390_TOPOLOGY_MAG2] = current_machine->smp.sockets;
+ sysib->mag[S390_TOPOLOGY_MAG1] = current_machine->smp.cores;
+ break;
+ case 3:
+ sysib->mag[S390_TOPOLOGY_MAG3] = current_machine->smp.drawers *
+ current_machine->smp.books;
+ sysib->mag[S390_TOPOLOGY_MAG2] = current_machine->smp.sockets;
+ sysib->mag[S390_TOPOLOGY_MAG1] = current_machine->smp.cores;
+ break;
+ case 2:
+ sysib->mag[S390_TOPOLOGY_MAG2] = current_machine->smp.drawers *
+ current_machine->smp.books *
+ current_machine->smp.sockets;
+ sysib->mag[S390_TOPOLOGY_MAG1] = current_machine->smp.cores;
+ break;
+ }
+
+ return stsi_topology_fill_sysib(topology_list, sysib->tle, level);
+}
+
+/**
+ * s390_topology_add_cpu_to_entry:
+ * @entry: Topology entry to setup
+ * @cpu: the S390CPU to add
+ *
+ * Set the core bit inside the topology mask.
+ */
+static void s390_topology_add_cpu_to_entry(S390TopologyEntry *entry,
+ S390CPU *cpu)
+{
+ set_bit(63 - (cpu->env.core_id % 64), &entry->mask);
+}
+
+/**
+ * s390_topology_from_cpu:
+ * @cpu: S390CPU to calculate the topology id
+ *
+ * Initialize the topology id from the CPU environment.
+ */
+static S390TopologyId s390_topology_from_cpu(S390CPU *cpu)
+{
+ S390TopologyId topology_id = {
+ .drawer = cpu->env.drawer_id,
+ .book = cpu->env.book_id,
+ .socket = cpu->env.socket_id,
+ .type = S390_TOPOLOGY_CPU_IFL,
+ .vertical = s390_topology.polarization == S390_CPU_POLARIZATION_VERTICAL,
+ .entitlement = cpu->env.entitlement,
+ .dedicated = cpu->env.dedicated,
+ .origin = cpu->env.core_id / 64,
+ };
+
+ return topology_id;
+}
+
+/**
+ * s390_topology_id_cmp:
+ * @l: first S390TopologyId
+ * @r: second S390TopologyId
+ *
+ * Compare two topology ids according to the sorting order specified by the PoP.
+ *
+ * Returns a negative number if the first id is less than, 0 if it is equal to
+ * and positive if it is larger than the second id.
+ */
+static int s390_topology_id_cmp(const S390TopologyId *l,
+ const S390TopologyId *r)
+{
+ /*
+ * lexical order, compare less significant values only if more significant
+ * ones are equal
+ */
+ return l->sentinel - r->sentinel ?:
+ l->drawer - r->drawer ?:
+ l->book - r->book ?:
+ l->socket - r->socket ?:
+ l->type - r->type ?:
+ /* logic is inverted for the next three */
+ r->vertical - l->vertical ?:
+ r->entitlement - l->entitlement ?:
+ r->dedicated - l->dedicated ?:
+ l->origin - r->origin;
+}
+
+static bool s390_topology_id_eq(const S390TopologyId *l,
+ const S390TopologyId *r)
+{
+ return !s390_topology_id_cmp(l, r);
+}
+
+static bool s390_topology_id_lt(const S390TopologyId *l,
+ const S390TopologyId *r)
+{
+ return s390_topology_id_cmp(l, r) < 0;
+}
+
+/**
+ * s390_topology_fill_list_sorted:
+ * @topology_list: list to fill
+ *
+ * Create S390TopologyEntrys as appropriate from all CPUs and fill the
+ * topology_list with the entries according to the order specified by the PoP.
+ */
+static void s390_topology_fill_list_sorted(S390TopologyList *topology_list)
+{
+ CPUState *cs;
+ S390TopologyEntry sentinel = { .id.sentinel = 1 };
+
+ QTAILQ_INIT(topology_list);
+
+ QTAILQ_INSERT_HEAD(topology_list, &sentinel, next);
+
+ CPU_FOREACH(cs) {
+ S390TopologyId id = s390_topology_from_cpu(S390_CPU(cs));
+ S390TopologyEntry *entry = NULL, *tmp;
+
+ QTAILQ_FOREACH(tmp, topology_list, next) {
+ if (s390_topology_id_eq(&id, &tmp->id)) {
+ entry = tmp;
+ break;
+ } else if (s390_topology_id_lt(&id, &tmp->id)) {
+ entry = g_malloc0(sizeof(*entry));
+ entry->id = id;
+ QTAILQ_INSERT_BEFORE(tmp, entry, next);
+ break;
+ }
+ }
+ assert(entry);
+ s390_topology_add_cpu_to_entry(entry, S390_CPU(cs));
+ }
+
+ QTAILQ_REMOVE(topology_list, &sentinel, next);
+}
+
+/**
+ * s390_topology_empty_list:
+ *
+ * Clear all entries in the S390Topology list.
+ */
+static void s390_topology_empty_list(S390TopologyList *topology_list)
+{
+ S390TopologyEntry *entry = NULL;
+ S390TopologyEntry *tmp = NULL;
+
+ QTAILQ_FOREACH_SAFE(entry, topology_list, next, tmp) {
+ QTAILQ_REMOVE(topology_list, entry, next);
+ g_free(entry);
+ }
+}
+
+/**
+ * insert_stsi_15_1_x:
+ * @cpu: the CPU doing the call for which we set CC
+ * @sel2: the selector 2, containing the nested level
+ * @addr: Guest logical address of the guest SysIB
+ * @ar: the access register number
+ * @ra: the return address
+ *
+ * Emulate STSI 15.1.x, that is, perform all necessary checks and
+ * fill the SYSIB.
+ * In case the topology description is too long to fit into the SYSIB,
+ * set CC=3 and abort without writing the SYSIB.
+ */
+void insert_stsi_15_1_x(S390CPU *cpu, int sel2, uint64_t addr, uint8_t ar, uintptr_t ra)
+{
+ S390TopologyList topology_list;
+ SysIB sysib = {0};
+ int length;
+
+ if (!s390_has_topology() || sel2 < 2 || sel2 > SCLP_READ_SCP_INFO_MNEST) {
+ setcc(cpu, 3);
+ return;
+ }
+
+ s390_topology_fill_list_sorted(&topology_list);
+ length = setup_stsi(&topology_list, &sysib.sysib_151x, sel2);
+ s390_topology_empty_list(&topology_list);
+
+ if (!length) {
+ setcc(cpu, 3);
+ return;
+ }
+
+ sysib.sysib_151x.length = cpu_to_be16(length);
+ if (!s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, length)) {
+ setcc(cpu, 0);
+ } else {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ }
+}
diff --git a/target/sparc/cpu-feature.h.inc b/target/sparc/cpu-feature.h.inc
new file mode 100644
index 0000000000..d800f18c4e
--- /dev/null
+++ b/target/sparc/cpu-feature.h.inc
@@ -0,0 +1,14 @@
+FEATURE(FLOAT128)
+FEATURE(MUL)
+FEATURE(DIV)
+FEATURE(VIS1)
+FEATURE(VIS2)
+FEATURE(FSMULD)
+FEATURE(HYPV)
+FEATURE(CMT)
+FEATURE(GL)
+FEATURE(TA0_SHUTDOWN) /* Shutdown on "ta 0x0" */
+FEATURE(ASR17)
+FEATURE(CACHE_CTRL)
+FEATURE(POWERDOWN)
+FEATURE(CASA)
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index 8ba96ae225..bb1a155510 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -403,9 +403,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_sfsr_mask = 0x00016fff,
.mmu_trcr_mask = 0x0000003f,
.nwindows = 7,
- .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL |
- CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT |
- CPU_FEATURE_FMUL,
+ .features = CPU_FEATURE_MUL | CPU_FEATURE_DIV,
},
{
.name = "TI MicroSparc II",
@@ -545,21 +543,20 @@ static const sparc_def_t sparc_defs[] = {
#endif
};
+/* This must match sparc_cpu_properties[]. */
static const char * const feature_name[] = {
- "float",
- "float128",
- "swap",
- "mul",
- "div",
- "flush",
- "fsqrt",
- "fmul",
- "vis1",
- "vis2",
- "fsmuld",
- "hypv",
- "cmt",
- "gl",
+ [CPU_FEATURE_BIT_FLOAT128] = "float128",
+#ifdef TARGET_SPARC64
+ [CPU_FEATURE_BIT_CMT] = "cmt",
+ [CPU_FEATURE_BIT_GL] = "gl",
+ [CPU_FEATURE_BIT_HYPV] = "hypv",
+ [CPU_FEATURE_BIT_VIS1] = "vis1",
+ [CPU_FEATURE_BIT_VIS2] = "vis2",
+#else
+ [CPU_FEATURE_BIT_MUL] = "mul",
+ [CPU_FEATURE_BIT_DIV] = "div",
+ [CPU_FEATURE_BIT_FSMULD] = "fsmuld",
+#endif
};
static void print_features(uint32_t features, const char *prefix)
@@ -757,9 +754,8 @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
CPUSPARCState *env = &cpu->env;
#if defined(CONFIG_USER_ONLY)
- if ((env->def.features & CPU_FEATURE_FLOAT)) {
- env->def.features |= CPU_FEATURE_FLOAT128;
- }
+ /* We are emulating the kernel, which will trap and emulate float128. */
+ env->def.features |= CPU_FEATURE_FLOAT128;
#endif
env->version = env->def.iu_version;
@@ -835,21 +831,29 @@ static PropertyInfo qdev_prop_nwindows = {
.set = sparc_set_nwindows,
};
+/* This must match feature_name[]. */
static Property sparc_cpu_properties[] = {
- DEFINE_PROP_BIT("float", SPARCCPU, env.def.features, 0, false),
- DEFINE_PROP_BIT("float128", SPARCCPU, env.def.features, 1, false),
- DEFINE_PROP_BIT("swap", SPARCCPU, env.def.features, 2, false),
- DEFINE_PROP_BIT("mul", SPARCCPU, env.def.features, 3, false),
- DEFINE_PROP_BIT("div", SPARCCPU, env.def.features, 4, false),
- DEFINE_PROP_BIT("flush", SPARCCPU, env.def.features, 5, false),
- DEFINE_PROP_BIT("fsqrt", SPARCCPU, env.def.features, 6, false),
- DEFINE_PROP_BIT("fmul", SPARCCPU, env.def.features, 7, false),
- DEFINE_PROP_BIT("vis1", SPARCCPU, env.def.features, 8, false),
- DEFINE_PROP_BIT("vis2", SPARCCPU, env.def.features, 9, false),
- DEFINE_PROP_BIT("fsmuld", SPARCCPU, env.def.features, 10, false),
- DEFINE_PROP_BIT("hypv", SPARCCPU, env.def.features, 11, false),
- DEFINE_PROP_BIT("cmt", SPARCCPU, env.def.features, 12, false),
- DEFINE_PROP_BIT("gl", SPARCCPU, env.def.features, 13, false),
+ DEFINE_PROP_BIT("float128", SPARCCPU, env.def.features,
+ CPU_FEATURE_BIT_FLOAT128, false),
+#ifdef TARGET_SPARC64
+ DEFINE_PROP_BIT("cmt", SPARCCPU, env.def.features,
+ CPU_FEATURE_BIT_CMT, false),
+ DEFINE_PROP_BIT("gl", SPARCCPU, env.def.features,
+ CPU_FEATURE_BIT_GL, false),
+ DEFINE_PROP_BIT("hypv", SPARCCPU, env.def.features,
+ CPU_FEATURE_BIT_HYPV, false),
+ DEFINE_PROP_BIT("vis1", SPARCCPU, env.def.features,
+ CPU_FEATURE_BIT_VIS1, false),
+ DEFINE_PROP_BIT("vis2", SPARCCPU, env.def.features,
+ CPU_FEATURE_BIT_VIS2, false),
+#else
+ DEFINE_PROP_BIT("mul", SPARCCPU, env.def.features,
+ CPU_FEATURE_BIT_MUL, false),
+ DEFINE_PROP_BIT("div", SPARCCPU, env.def.features,
+ CPU_FEATURE_BIT_DIV, false),
+ DEFINE_PROP_BIT("fsmuld", SPARCCPU, env.def.features,
+ CPU_FEATURE_BIT_FSMULD, false),
+#endif
DEFINE_PROP_UNSIGNED("iu-version", SPARCCPU, env.def.iu_version, 0,
qdev_prop_uint64, target_ulong),
DEFINE_PROP_UINT32("fpu-version", SPARCCPU, env.def.fpu_version, 0),
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index b3a98f1d74..758a4e8aaa 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -6,6 +6,29 @@
#include "exec/cpu-defs.h"
#include "qemu/cpu-float.h"
+/*
+ * From Oracle SPARC Architecture 2015:
+ *
+ * Compatibility notes: The PSO memory model described in SPARC V8 and
+ * SPARC V9 compatibility architecture specifications was never implemented
+ * in a SPARC V9 implementation and is not included in the Oracle SPARC
+ * Architecture specification.
+ *
+ * The RMO memory model described in the SPARC V9 specification was
+ * implemented in some non-Sun SPARC V9 implementations, but is not
+ * directly supported in Oracle SPARC Architecture 2015 implementations.
+ *
+ * Therefore always use TSO in QEMU.
+ *
+ * D.5 Specification of Partial Store Order (PSO)
+ * ... [loads] are followed by an implied MEMBAR #LoadLoad | #LoadStore.
+ *
+ * D.6 Specification of Total Store Order (TSO)
+ * ... PSO with the additional requirement that all [stores] are followed
+ * by an implied MEMBAR #StoreStore.
+ */
+#define TCG_GUEST_DEFAULT_MO (TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST)
+
#if !defined(TARGET_SPARC64)
#define TARGET_DPREGS 16
#else
@@ -268,38 +291,27 @@ struct sparc_def_t {
uint32_t maxtl;
};
-#define CPU_FEATURE_FLOAT (1 << 0)
-#define CPU_FEATURE_FLOAT128 (1 << 1)
-#define CPU_FEATURE_SWAP (1 << 2)
-#define CPU_FEATURE_MUL (1 << 3)
-#define CPU_FEATURE_DIV (1 << 4)
-#define CPU_FEATURE_FLUSH (1 << 5)
-#define CPU_FEATURE_FSQRT (1 << 6)
-#define CPU_FEATURE_FMUL (1 << 7)
-#define CPU_FEATURE_VIS1 (1 << 8)
-#define CPU_FEATURE_VIS2 (1 << 9)
-#define CPU_FEATURE_FSMULD (1 << 10)
-#define CPU_FEATURE_HYPV (1 << 11)
-#define CPU_FEATURE_CMT (1 << 12)
-#define CPU_FEATURE_GL (1 << 13)
-#define CPU_FEATURE_TA0_SHUTDOWN (1 << 14) /* Shutdown on "ta 0x0" */
-#define CPU_FEATURE_ASR17 (1 << 15)
-#define CPU_FEATURE_CACHE_CTRL (1 << 16)
-#define CPU_FEATURE_POWERDOWN (1 << 17)
-#define CPU_FEATURE_CASA (1 << 18)
+#define FEATURE(X) CPU_FEATURE_BIT_##X,
+enum {
+#include "cpu-feature.h.inc"
+};
+
+#undef FEATURE
+#define FEATURE(X) CPU_FEATURE_##X = 1u << CPU_FEATURE_BIT_##X,
+
+enum {
+#include "cpu-feature.h.inc"
+};
+
+#undef FEATURE
#ifndef TARGET_SPARC64
-#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \
- CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
- CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
- CPU_FEATURE_FMUL | CPU_FEATURE_FSMULD)
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
+ CPU_FEATURE_FSMULD)
#else
-#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \
- CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
- CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
- CPU_FEATURE_FMUL | CPU_FEATURE_VIS1 | \
- CPU_FEATURE_VIS2 | CPU_FEATURE_FSMULD | \
- CPU_FEATURE_CASA)
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
+ CPU_FEATURE_FSMULD | CPU_FEATURE_CASA | \
+ CPU_FEATURE_VIS1 | CPU_FEATURE_VIS2)
enum {
mmu_us_12, // Ultrasparc < III (64 entry TLB)
mmu_us_3, // Ultrasparc III (512 entry TLB)
@@ -782,14 +794,12 @@ static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc,
if (env->pstate & PS_AM) {
flags |= TB_FLAG_AM_ENABLED;
}
- if ((env->def.features & CPU_FEATURE_FLOAT)
- && (env->pstate & PS_PEF)
- && (env->fprs & FPRS_FEF)) {
+ if ((env->pstate & PS_PEF) && (env->fprs & FPRS_FEF)) {
flags |= TB_FLAG_FPU_ENABLED;
}
flags |= env->asi << TB_FLAG_ASI_SHIFT;
#else
- if ((env->def.features & CPU_FEATURE_FLOAT) && env->psref) {
+ if (env->psref) {
flags |= TB_FLAG_FPU_ENABLED;
}
#endif
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
index f54fa9b959..0f8aa3abcd 100644
--- a/target/sparc/fop_helper.c
+++ b/target/sparc/fop_helper.c
@@ -382,20 +382,7 @@ static void set_fsr(CPUSPARCState *env, target_ulong fsr)
set_float_rounding_mode(rnd_mode, &env->fp_status);
}
-target_ulong helper_ldfsr(CPUSPARCState *env, target_ulong old_fsr,
- uint32_t new_fsr)
+void helper_set_fsr(CPUSPARCState *env, target_ulong fsr)
{
- old_fsr = (new_fsr & FSR_LDFSR_MASK) | (old_fsr & FSR_LDFSR_OLDMASK);
- set_fsr(env, old_fsr);
- return old_fsr;
+ set_fsr(env, fsr);
}
-
-#ifdef TARGET_SPARC64
-target_ulong helper_ldxfsr(CPUSPARCState *env, target_ulong old_fsr,
- uint64_t new_fsr)
-{
- old_fsr = (new_fsr & FSR_LDXFSR_MASK) | (old_fsr & FSR_LDXFSR_OLDMASK);
- set_fsr(env, old_fsr);
- return old_fsr;
-}
-#endif
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
index c4358bba84..2bcdc81d54 100644
--- a/target/sparc/helper.c
+++ b/target/sparc/helper.c
@@ -102,9 +102,7 @@ static target_ulong do_udiv(CPUSPARCState *env, target_ulong a,
}
if (cc) {
- env->cc_dst = x0;
env->cc_src2 = overflow;
- env->cc_op = CC_OP_DIV;
}
return x0;
}
@@ -143,9 +141,7 @@ static target_ulong do_sdiv(CPUSPARCState *env, target_ulong a,
}
if (cc) {
- env->cc_dst = x0;
env->cc_src2 = overflow;
- env->cc_op = CC_OP_DIV;
}
return x0;
}
@@ -202,10 +198,8 @@ target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
}
/* Only modify the CC after any exceptions have been generated. */
- env->cc_op = CC_OP_TADDTV;
env->cc_src = src1;
env->cc_src2 = src2;
- env->cc_dst = dst;
return dst;
tag_overflow:
@@ -230,10 +224,8 @@ target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
}
/* Only modify the CC after any exceptions have been generated. */
- env->cc_op = CC_OP_TSUBTV;
env->cc_src = src1;
env->cc_src2 = src2;
- env->cc_dst = dst;
return dst;
tag_overflow:
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
index b8f1e78c75..dd1721a340 100644
--- a/target/sparc/helper.h
+++ b/target/sparc/helper.h
@@ -24,7 +24,6 @@ DEF_HELPER_FLAGS_2(tick_set_count, TCG_CALL_NO_RWG, void, ptr, i64)
DEF_HELPER_FLAGS_3(tick_get_count, TCG_CALL_NO_WG, i64, env, ptr, int)
DEF_HELPER_FLAGS_2(tick_set_limit, TCG_CALL_NO_RWG, void, ptr, i64)
#endif
-DEF_HELPER_FLAGS_3(check_align, TCG_CALL_NO_WG, void, env, tl, i32)
DEF_HELPER_1(debug, void, env)
DEF_HELPER_1(save, void, env)
DEF_HELPER_1(restore, void, env)
@@ -43,7 +42,7 @@ DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32)
DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32)
#endif
DEF_HELPER_FLAGS_1(check_ieee_exceptions, TCG_CALL_NO_WG, tl, env)
-DEF_HELPER_FLAGS_3(ldfsr, TCG_CALL_NO_RWG, tl, env, tl, i32)
+DEF_HELPER_FLAGS_2(set_fsr, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32)
DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, f32, env, f32)
DEF_HELPER_FLAGS_2(fsqrtd, TCG_CALL_NO_RWG, f64, env, f64)
@@ -55,7 +54,6 @@ DEF_HELPER_FLAGS_1(fsqrtq, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_1(fcmpq, TCG_CALL_NO_WG, tl, env)
DEF_HELPER_FLAGS_1(fcmpeq, TCG_CALL_NO_WG, tl, env)
#ifdef TARGET_SPARC64
-DEF_HELPER_FLAGS_3(ldxfsr, TCG_CALL_NO_RWG, tl, env, tl, i64)
DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64)
DEF_HELPER_FLAGS_3(fcmps_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32)
DEF_HELPER_FLAGS_3(fcmps_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32)
@@ -139,18 +137,6 @@ DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_NO_RWG_SE, i32, i64, i64)
DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_NO_RWG_SE, i32, i64, i64)
DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
-#define VIS_HELPER(name) \
- DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_NO_RWG_SE, \
- i64, i64, i64) \
- DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_NO_RWG_SE, \
- i32, i32, i32) \
- DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_NO_RWG_SE, \
- i64, i64, i64) \
- DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \
- i32, i32, i32)
-
-VIS_HELPER(padd)
-VIS_HELPER(psub)
#define VIS_CMPHELPER(name) \
DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \
i64, i64, i64) \
diff --git a/target/sparc/insns.decode b/target/sparc/insns.decode
new file mode 100644
index 0000000000..0552f1447d
--- /dev/null
+++ b/target/sparc/insns.decode
@@ -0,0 +1,547 @@
+# SPDX-License-Identifier: LGPL-2.0+
+#
+# Sparc instruction decode definitions.
+# Copyright (c) 2023 Richard Henderson <rth@twiddle.net>
+
+##
+## Major Opcodes 00 and 01 -- branches, call, and sethi.
+##
+
+&bcc i a cond cc
+BPcc 00 a:1 cond:4 001 cc:1 0 - i:s19 &bcc
+Bicc 00 a:1 cond:4 010 i:s22 &bcc cc=0
+FBPfcc 00 a:1 cond:4 101 cc:2 - i:s19 &bcc
+FBfcc 00 a:1 cond:4 110 i:s22 &bcc cc=0
+
+%d16 20:s2 0:14
+BPr 00 a:1 0 cond:3 011 .. - rs1:5 .............. i=%d16
+
+NCP 00 - ---- 111 ---------------------- # CBcc
+
+SETHI 00 rd:5 100 i:22
+
+CALL 01 i:s30
+
+##
+## Major Opcode 10 -- integer, floating-point, vis, and system insns.
+##
+
+&r_r_ri rd rs1 rs2_or_imm imm:bool
+@n_r_ri .. ..... ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri rd=0
+@r_r_ri .. rd:5 ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri
+
+&r_r_ri_cc rd rs1 rs2_or_imm imm:bool cc:bool
+@r_r_ri_cc .. rd:5 . cc:1 .... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri_cc
+@r_r_ri_cc0 .. rd:5 ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri_cc cc=0
+@r_r_ri_cc1 .. rd:5 ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri_cc cc=1
+
+&r_r_r rd rs1 rs2
+@r_r_r .. rd:5 ...... rs1:5 . ........ rs2:5 &r_r_r
+@r_r_r_swap .. rd:5 ...... rs2:5 . ........ rs1:5 &r_r_r
+
+&r_r rd rs
+@r_r1 .. rd:5 ...... rs:5 . ........ ..... &r_r
+@r_r2 .. rd:5 ...... ..... . ........ rs:5 &r_r
+
+{
+ [
+ STBAR 10 00000 101000 01111 0 0000000000000
+ MEMBAR 10 00000 101000 01111 1 000000 cmask:3 mmask:4
+
+ RDCCR 10 rd:5 101000 00010 0 0000000000000
+ RDASI 10 rd:5 101000 00011 0 0000000000000
+ RDTICK 10 rd:5 101000 00100 0 0000000000000
+ RDPC 10 rd:5 101000 00101 0 0000000000000
+ RDFPRS 10 rd:5 101000 00110 0 0000000000000
+ RDASR17 10 rd:5 101000 10001 0 0000000000000
+ RDGSR 10 rd:5 101000 10011 0 0000000000000
+ RDSOFTINT 10 rd:5 101000 10110 0 0000000000000
+ RDTICK_CMPR 10 rd:5 101000 10111 0 0000000000000
+ RDSTICK 10 rd:5 101000 11000 0 0000000000000
+ RDSTICK_CMPR 10 rd:5 101000 11001 0 0000000000000
+ RDSTRAND_STATUS 10 rd:5 101000 11010 0 0000000000000
+ ]
+ # Before v8, all rs1 accepted; otherwise rs1==0.
+ RDY 10 rd:5 101000 rs1:5 0 0000000000000
+}
+
+{
+ [
+ WRY 10 00000 110000 ..... . ............. @n_r_ri
+ WRCCR 10 00010 110000 ..... . ............. @n_r_ri
+ WRASI 10 00011 110000 ..... . ............. @n_r_ri
+ WRFPRS 10 00110 110000 ..... . ............. @n_r_ri
+ {
+ WRGSR 10 10011 110000 ..... . ............. @n_r_ri
+ WRPOWERDOWN 10 10011 110000 ..... . ............. @n_r_ri
+ }
+ WRSOFTINT_SET 10 10100 110000 ..... . ............. @n_r_ri
+ WRSOFTINT_CLR 10 10101 110000 ..... . ............. @n_r_ri
+ WRSOFTINT 10 10110 110000 ..... . ............. @n_r_ri
+ WRTICK_CMPR 10 10111 110000 ..... . ............. @n_r_ri
+ WRSTICK 10 11000 110000 ..... . ............. @n_r_ri
+ WRSTICK_CMPR 10 11001 110000 ..... . ............. @n_r_ri
+ ]
+ # Before v8, rs1==0 was WRY, and the rest executed as nop.
+ [
+ NOP_v7 10 ----- 110000 ----- 0 00000000 -----
+ NOP_v7 10 ----- 110000 ----- 1 -------- -----
+ ]
+}
+
+{
+ RDPSR 10 rd:5 101001 00000 0 0000000000000
+ RDHPR_hpstate 10 rd:5 101001 00000 0 0000000000000
+}
+RDHPR_htstate 10 rd:5 101001 00001 0 0000000000000
+RDHPR_hintp 10 rd:5 101001 00011 0 0000000000000
+RDHPR_htba 10 rd:5 101001 00101 0 0000000000000
+RDHPR_hver 10 rd:5 101001 00110 0 0000000000000
+RDHPR_hstick_cmpr 10 rd:5 101001 11111 0 0000000000000
+
+{
+ WRPSR 10 00000 110001 ..... . ............. @n_r_ri
+ SAVED 10 00000 110001 00000 0 0000000000000
+}
+RESTORED 10 00001 110001 00000 0 0000000000000
+# UA2005 ALLCLEAN
+# UA2005 OTHERW
+# UA2005 NORMALW
+# UA2005 INVALW
+
+{
+ RDWIM 10 rd:5 101010 00000 0 0000000000000
+ RDPR_tpc 10 rd:5 101010 00000 0 0000000000000
+}
+RDPR_tnpc 10 rd:5 101010 00001 0 0000000000000
+RDPR_tstate 10 rd:5 101010 00010 0 0000000000000
+RDPR_tt 10 rd:5 101010 00011 0 0000000000000
+RDPR_tick 10 rd:5 101010 00100 0 0000000000000
+RDPR_tba 10 rd:5 101010 00101 0 0000000000000
+RDPR_pstate 10 rd:5 101010 00110 0 0000000000000
+RDPR_tl 10 rd:5 101010 00111 0 0000000000000
+RDPR_pil 10 rd:5 101010 01000 0 0000000000000
+RDPR_cwp 10 rd:5 101010 01001 0 0000000000000
+RDPR_cansave 10 rd:5 101010 01010 0 0000000000000
+RDPR_canrestore 10 rd:5 101010 01011 0 0000000000000
+RDPR_cleanwin 10 rd:5 101010 01100 0 0000000000000
+RDPR_otherwin 10 rd:5 101010 01101 0 0000000000000
+RDPR_wstate 10 rd:5 101010 01110 0 0000000000000
+RDPR_gl 10 rd:5 101010 10000 0 0000000000000
+RDPR_strand_status 10 rd:5 101010 11010 0 0000000000000
+RDPR_ver 10 rd:5 101010 11111 0 0000000000000
+
+{
+ WRWIM 10 00000 110010 ..... . ............. @n_r_ri
+ WRPR_tpc 10 00000 110010 ..... . ............. @n_r_ri
+}
+WRPR_tnpc 10 00001 110010 ..... . ............. @n_r_ri
+WRPR_tstate 10 00010 110010 ..... . ............. @n_r_ri
+WRPR_tt 10 00011 110010 ..... . ............. @n_r_ri
+WRPR_tick 10 00100 110010 ..... . ............. @n_r_ri
+WRPR_tba 10 00101 110010 ..... . ............. @n_r_ri
+WRPR_pstate 10 00110 110010 ..... . ............. @n_r_ri
+WRPR_tl 10 00111 110010 ..... . ............. @n_r_ri
+WRPR_pil 10 01000 110010 ..... . ............. @n_r_ri
+WRPR_cwp 10 01001 110010 ..... . ............. @n_r_ri
+WRPR_cansave 10 01010 110010 ..... . ............. @n_r_ri
+WRPR_canrestore 10 01011 110010 ..... . ............. @n_r_ri
+WRPR_cleanwin 10 01100 110010 ..... . ............. @n_r_ri
+WRPR_otherwin 10 01101 110010 ..... . ............. @n_r_ri
+WRPR_wstate 10 01110 110010 ..... . ............. @n_r_ri
+WRPR_gl 10 10000 110010 ..... . ............. @n_r_ri
+WRPR_strand_status 10 11010 110010 ..... . ............. @n_r_ri
+
+{
+ FLUSHW 10 00000 101011 00000 0 0000000000000
+ RDTBR 10 rd:5 101011 00000 0 0000000000000
+}
+
+{
+ WRTBR 10 00000 110011 ..... . ............. @n_r_ri
+ WRHPR_hpstate 10 00000 110011 ..... . ............. @n_r_ri
+}
+WRHPR_htstate 10 00001 110011 ..... . ............. @n_r_ri
+WRHPR_hintp 10 00011 110011 ..... . ............. @n_r_ri
+WRHPR_htba 10 00101 110011 ..... . ............. @n_r_ri
+WRHPR_hstick_cmpr 10 11111 110011 ..... . ............. @n_r_ri
+
+ADD 10 ..... 0.0000 ..... . ............. @r_r_ri_cc
+AND 10 ..... 0.0001 ..... . ............. @r_r_ri_cc
+OR 10 ..... 0.0010 ..... . ............. @r_r_ri_cc
+XOR 10 ..... 0.0011 ..... . ............. @r_r_ri_cc
+SUB 10 ..... 0.0100 ..... . ............. @r_r_ri_cc
+ANDN 10 ..... 0.0101 ..... . ............. @r_r_ri_cc
+ORN 10 ..... 0.0110 ..... . ............. @r_r_ri_cc
+XORN 10 ..... 0.0111 ..... . ............. @r_r_ri_cc
+ADDC 10 ..... 0.1000 ..... . ............. @r_r_ri_cc
+SUBC 10 ..... 0.1100 ..... . ............. @r_r_ri_cc
+
+MULX 10 ..... 001001 ..... . ............. @r_r_ri_cc0
+UMUL 10 ..... 0.1010 ..... . ............. @r_r_ri_cc
+SMUL 10 ..... 0.1011 ..... . ............. @r_r_ri_cc
+MULScc 10 ..... 100100 ..... . ............. @r_r_ri_cc1
+
+UDIVX 10 ..... 001101 ..... . ............. @r_r_ri_cc0
+SDIVX 10 ..... 101101 ..... . ............. @r_r_ri_cc0
+UDIV 10 ..... 0.1110 ..... . ............. @r_r_ri_cc
+SDIV 10 ..... 0.1111 ..... . ............. @r_r_ri_cc
+
+TADDcc 10 ..... 100000 ..... . ............. @r_r_ri_cc1
+TSUBcc 10 ..... 100001 ..... . ............. @r_r_ri_cc1
+TADDccTV 10 ..... 100010 ..... . ............. @r_r_ri_cc1
+TSUBccTV 10 ..... 100011 ..... . ............. @r_r_ri_cc1
+
+POPC 10 rd:5 101110 00000 imm:1 rs2_or_imm:s13 \
+ &r_r_ri_cc rs1=0 cc=0
+
+&shiftr rd rs1 rs2 x:bool
+@shiftr .. rd:5 ...... rs1:5 . x:1 ....... rs2:5 &shiftr
+
+SLL_r 10 ..... 100101 ..... 0 . 0000000 ..... @shiftr
+SRL_r 10 ..... 100110 ..... 0 . 0000000 ..... @shiftr
+SRA_r 10 ..... 100111 ..... 0 . 0000000 ..... @shiftr
+
+&shifti rd rs1 i x:bool
+@shifti .. rd:5 ...... rs1:5 . x:1 ...... i:6 &shifti
+
+SLL_i 10 ..... 100101 ..... 1 . 000000 ...... @shifti
+SRL_i 10 ..... 100110 ..... 1 . 000000 ...... @shifti
+SRA_i 10 ..... 100111 ..... 1 . 000000 ...... @shifti
+
+Tcc_r 10 0 cond:4 111010 rs1:5 0 cc:1 0000000 rs2:5
+{
+ # For v7, the entire simm13 field is present, but masked to 7 bits.
+ # For v8, [12:7] are reserved. However, a compatibility note for
+ # the Tcc insn in the v9 manual suggests that the v8 reserved field
+ # was ignored and did not produce traps.
+ Tcc_i_v7 10 0 cond:4 111010 rs1:5 1 ------ i:7
+
+ # For v9, bits [12:11] are cc1 and cc0 (and cc0 must be 0).
+ # Bits [10:8] are reserved and the OSA2011 manual says they must be 0.
+ Tcc_i_v9 10 0 cond:4 111010 rs1:5 1 cc:1 0 000 i:8
+}
+
+MOVcc 10 rd:5 101100 1 cond:4 imm:1 cc:1 0 rs2_or_imm:s11
+MOVfcc 10 rd:5 101100 0 cond:4 imm:1 cc:2 rs2_or_imm:s11
+MOVR 10 rd:5 101111 rs1:5 imm:1 cond:3 rs2_or_imm:s10
+
+JMPL 10 ..... 111000 ..... . ............. @r_r_ri
+{
+ RETT 10 00000 111001 ..... . ............. @n_r_ri
+ RETURN 10 00000 111001 ..... . ............. @n_r_ri
+}
+NOP 10 00000 111011 ----- 0 00000000----- # FLUSH reg+reg
+NOP 10 00000 111011 ----- 1 ------------- # FLUSH reg+imm
+SAVE 10 ..... 111100 ..... . ............. @r_r_ri
+RESTORE 10 ..... 111101 ..... . ............. @r_r_ri
+
+DONE 10 00000 111110 00000 0 0000000000000
+RETRY 10 00001 111110 00000 0 0000000000000
+
+FMOVs 10 ..... 110100 00000 0 0000 0001 ..... @r_r2
+FMOVd 10 ..... 110100 00000 0 0000 0010 ..... @r_r2
+FMOVq 10 ..... 110100 00000 0 0000 0011 ..... @r_r2
+FNEGs 10 ..... 110100 00000 0 0000 0101 ..... @r_r2
+FNEGd 10 ..... 110100 00000 0 0000 0110 ..... @r_r2
+FNEGq 10 ..... 110100 00000 0 0000 0111 ..... @r_r2
+FABSs 10 ..... 110100 00000 0 0000 1001 ..... @r_r2
+FABSd 10 ..... 110100 00000 0 0000 1010 ..... @r_r2
+FABSq 10 ..... 110100 00000 0 0000 1011 ..... @r_r2
+FSQRTs 10 ..... 110100 00000 0 0010 1001 ..... @r_r2
+FSQRTd 10 ..... 110100 00000 0 0010 1010 ..... @r_r2
+FSQRTq 10 ..... 110100 00000 0 0010 1011 ..... @r_r2
+FADDs 10 ..... 110100 ..... 0 0100 0001 ..... @r_r_r
+FADDd 10 ..... 110100 ..... 0 0100 0010 ..... @r_r_r
+FADDq 10 ..... 110100 ..... 0 0100 0011 ..... @r_r_r
+FSUBs 10 ..... 110100 ..... 0 0100 0101 ..... @r_r_r
+FSUBd 10 ..... 110100 ..... 0 0100 0110 ..... @r_r_r
+FSUBq 10 ..... 110100 ..... 0 0100 0111 ..... @r_r_r
+FMULs 10 ..... 110100 ..... 0 0100 1001 ..... @r_r_r
+FMULd 10 ..... 110100 ..... 0 0100 1010 ..... @r_r_r
+FMULq 10 ..... 110100 ..... 0 0100 1011 ..... @r_r_r
+FDIVs 10 ..... 110100 ..... 0 0100 1101 ..... @r_r_r
+FDIVd 10 ..... 110100 ..... 0 0100 1110 ..... @r_r_r
+FDIVq 10 ..... 110100 ..... 0 0100 1111 ..... @r_r_r
+FsMULd 10 ..... 110100 ..... 0 0110 1001 ..... @r_r_r
+FdMULq 10 ..... 110100 ..... 0 0110 1110 ..... @r_r_r
+FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @r_r2
+FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @r_r2
+FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @r_r2
+FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_r2
+FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @r_r2
+FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @r_r2
+FiTOs 10 ..... 110100 00000 0 1100 0100 ..... @r_r2
+FdTOs 10 ..... 110100 00000 0 1100 0110 ..... @r_r2
+FqTOs 10 ..... 110100 00000 0 1100 0111 ..... @r_r2
+FiTOd 10 ..... 110100 00000 0 1100 1000 ..... @r_r2
+FsTOd 10 ..... 110100 00000 0 1100 1001 ..... @r_r2
+FqTOd 10 ..... 110100 00000 0 1100 1011 ..... @r_r2
+FiTOq 10 ..... 110100 00000 0 1100 1100 ..... @r_r2
+FsTOq 10 ..... 110100 00000 0 1100 1101 ..... @r_r2
+FdTOq 10 ..... 110100 00000 0 1100 1110 ..... @r_r2
+FsTOi 10 ..... 110100 00000 0 1101 0001 ..... @r_r2
+FdTOi 10 ..... 110100 00000 0 1101 0010 ..... @r_r2
+FqTOi 10 ..... 110100 00000 0 1101 0011 ..... @r_r2
+
+FMOVscc 10 rd:5 110101 0 cond:4 1 cc:1 0 000001 rs2:5
+FMOVdcc 10 rd:5 110101 0 cond:4 1 cc:1 0 000010 rs2:5
+FMOVqcc 10 rd:5 110101 0 cond:4 1 cc:1 0 000011 rs2:5
+
+FMOVsfcc 10 rd:5 110101 0 cond:4 0 cc:2 000001 rs2:5
+FMOVdfcc 10 rd:5 110101 0 cond:4 0 cc:2 000010 rs2:5
+FMOVqfcc 10 rd:5 110101 0 cond:4 0 cc:2 000011 rs2:5
+
+FMOVRs 10 rd:5 110101 rs1:5 0 cond:3 00101 rs2:5
+FMOVRd 10 rd:5 110101 rs1:5 0 cond:3 00110 rs2:5
+FMOVRq 10 rd:5 110101 rs1:5 0 cond:3 00111 rs2:5
+
+FCMPs 10 000 cc:2 110101 rs1:5 0 0101 0001 rs2:5
+FCMPd 10 000 cc:2 110101 rs1:5 0 0101 0010 rs2:5
+FCMPq 10 000 cc:2 110101 rs1:5 0 0101 0011 rs2:5
+FCMPEs 10 000 cc:2 110101 rs1:5 0 0101 0101 rs2:5
+FCMPEd 10 000 cc:2 110101 rs1:5 0 0101 0110 rs2:5
+FCMPEq 10 000 cc:2 110101 rs1:5 0 0101 0111 rs2:5
+
+{
+ [
+ EDGE8cc 10 ..... 110110 ..... 0 0000 0000 ..... @r_r_r
+ EDGE8N 10 ..... 110110 ..... 0 0000 0001 ..... @r_r_r
+ EDGE8Lcc 10 ..... 110110 ..... 0 0000 0010 ..... @r_r_r
+ EDGE8LN 10 ..... 110110 ..... 0 0000 0011 ..... @r_r_r
+ EDGE16cc 10 ..... 110110 ..... 0 0000 0100 ..... @r_r_r
+ EDGE16N 10 ..... 110110 ..... 0 0000 0101 ..... @r_r_r
+ EDGE16Lcc 10 ..... 110110 ..... 0 0000 0110 ..... @r_r_r
+ EDGE16LN 10 ..... 110110 ..... 0 0000 0111 ..... @r_r_r
+ EDGE32cc 10 ..... 110110 ..... 0 0000 1000 ..... @r_r_r
+ EDGE32N 10 ..... 110110 ..... 0 0000 1001 ..... @r_r_r
+ EDGE32Lcc 10 ..... 110110 ..... 0 0000 1010 ..... @r_r_r
+ EDGE32LN 10 ..... 110110 ..... 0 0000 1011 ..... @r_r_r
+
+ ARRAY8 10 ..... 110110 ..... 0 0001 0000 ..... @r_r_r
+ ARRAY16 10 ..... 110110 ..... 0 0001 0010 ..... @r_r_r
+ ARRAY32 10 ..... 110110 ..... 0 0001 0100 ..... @r_r_r
+
+ ALIGNADDR 10 ..... 110110 ..... 0 0001 1000 ..... @r_r_r
+ ALIGNADDRL 10 ..... 110110 ..... 0 0001 1010 ..... @r_r_r
+
+ BMASK 10 ..... 110110 ..... 0 0001 1001 ..... @r_r_r
+
+ FPCMPLE16 10 ..... 110110 ..... 0 0010 0000 ..... @r_r_r
+ FPCMPNE16 10 ..... 110110 ..... 0 0010 0010 ..... @r_r_r
+ FPCMPGT16 10 ..... 110110 ..... 0 0010 1000 ..... @r_r_r
+ FPCMPEQ16 10 ..... 110110 ..... 0 0010 1010 ..... @r_r_r
+ FPCMPLE32 10 ..... 110110 ..... 0 0010 0100 ..... @r_r_r
+ FPCMPNE32 10 ..... 110110 ..... 0 0010 0110 ..... @r_r_r
+ FPCMPGT32 10 ..... 110110 ..... 0 0010 1100 ..... @r_r_r
+ FPCMPEQ32 10 ..... 110110 ..... 0 0010 1110 ..... @r_r_r
+
+ FMUL8x16 10 ..... 110110 ..... 0 0011 0001 ..... @r_r_r
+ FMUL8x16AU 10 ..... 110110 ..... 0 0011 0011 ..... @r_r_r
+ FMUL8x16AL 10 ..... 110110 ..... 0 0011 0101 ..... @r_r_r
+ FMUL8SUx16 10 ..... 110110 ..... 0 0011 0110 ..... @r_r_r
+ FMUL8ULx16 10 ..... 110110 ..... 0 0011 0111 ..... @r_r_r
+ FMULD8SUx16 10 ..... 110110 ..... 0 0011 1000 ..... @r_r_r
+ FMULD8ULx16 10 ..... 110110 ..... 0 0011 1001 ..... @r_r_r
+ FPACK32 10 ..... 110110 ..... 0 0011 1010 ..... @r_r_r
+ FPACK16 10 ..... 110110 00000 0 0011 1011 ..... @r_r2
+ FPACKFIX 10 ..... 110110 00000 0 0011 1101 ..... @r_r2
+ PDIST 10 ..... 110110 ..... 0 0011 1110 ..... @r_r_r
+
+ FALIGNDATAg 10 ..... 110110 ..... 0 0100 1000 ..... @r_r_r
+ FPMERGE 10 ..... 110110 ..... 0 0100 1011 ..... @r_r_r
+ BSHUFFLE 10 ..... 110110 ..... 0 0100 1100 ..... @r_r_r
+ FEXPAND 10 ..... 110110 ..... 0 0100 1101 ..... @r_r_r
+
+ FSRCd 10 ..... 110110 ..... 0 0111 0100 00000 @r_r1 # FSRC1d
+ FSRCs 10 ..... 110110 ..... 0 0111 0101 00000 @r_r1 # FSRC1s
+ FSRCd 10 ..... 110110 00000 0 0111 1000 ..... @r_r2 # FSRC2d
+ FSRCs 10 ..... 110110 00000 0 0111 1001 ..... @r_r2 # FSRC2s
+ FNOTd 10 ..... 110110 ..... 0 0110 1010 00000 @r_r1 # FNOT1d
+ FNOTs 10 ..... 110110 ..... 0 0110 1011 00000 @r_r1 # FNOT1s
+ FNOTd 10 ..... 110110 00000 0 0110 0110 ..... @r_r2 # FNOT2d
+ FNOTs 10 ..... 110110 00000 0 0110 0111 ..... @r_r2 # FNOT2s
+
+ FPADD16 10 ..... 110110 ..... 0 0101 0000 ..... @r_r_r
+ FPADD16s 10 ..... 110110 ..... 0 0101 0001 ..... @r_r_r
+ FPADD32 10 ..... 110110 ..... 0 0101 0010 ..... @r_r_r
+ FPADD32s 10 ..... 110110 ..... 0 0101 0011 ..... @r_r_r
+ FPSUB16 10 ..... 110110 ..... 0 0101 0100 ..... @r_r_r
+ FPSUB16s 10 ..... 110110 ..... 0 0101 0101 ..... @r_r_r
+ FPSUB32 10 ..... 110110 ..... 0 0101 0110 ..... @r_r_r
+ FPSUB32s 10 ..... 110110 ..... 0 0101 0111 ..... @r_r_r
+
+ FNORd 10 ..... 110110 ..... 0 0110 0010 ..... @r_r_r
+ FNORs 10 ..... 110110 ..... 0 0110 0011 ..... @r_r_r
+ FANDNOTd 10 ..... 110110 ..... 0 0110 0100 ..... @r_r_r # FANDNOT2d
+ FANDNOTs 10 ..... 110110 ..... 0 0110 0101 ..... @r_r_r # FANDNOT2s
+ FANDNOTd 10 ..... 110110 ..... 0 0110 1000 ..... @r_r_r_swap # ... 1d
+ FANDNOTs 10 ..... 110110 ..... 0 0110 1001 ..... @r_r_r_swap # ... 1s
+ FXORd 10 ..... 110110 ..... 0 0110 1100 ..... @r_r_r
+ FXORs 10 ..... 110110 ..... 0 0110 1101 ..... @r_r_r
+ FNANDd 10 ..... 110110 ..... 0 0110 1110 ..... @r_r_r
+ FNANDs 10 ..... 110110 ..... 0 0110 1111 ..... @r_r_r
+ FANDd 10 ..... 110110 ..... 0 0111 0000 ..... @r_r_r
+ FANDs 10 ..... 110110 ..... 0 0111 0001 ..... @r_r_r
+ FXNORd 10 ..... 110110 ..... 0 0111 0010 ..... @r_r_r
+ FXNORs 10 ..... 110110 ..... 0 0111 0011 ..... @r_r_r
+ FORNOTd 10 ..... 110110 ..... 0 0111 0110 ..... @r_r_r # FORNOT2d
+ FORNOTs 10 ..... 110110 ..... 0 0111 0111 ..... @r_r_r # FORNOT2s
+ FORNOTd 10 ..... 110110 ..... 0 0111 1010 ..... @r_r_r_swap # ... 1d
+ FORNOTs 10 ..... 110110 ..... 0 0111 1011 ..... @r_r_r_swap # ... 1s
+ FORd 10 ..... 110110 ..... 0 0111 1100 ..... @r_r_r
+ FORs 10 ..... 110110 ..... 0 0111 1101 ..... @r_r_r
+
+ FZEROd 10 rd:5 110110 00000 0 0110 0000 00000
+ FZEROs 10 rd:5 110110 00000 0 0110 0001 00000
+ FONEd 10 rd:5 110110 00000 0 0111 1110 00000
+ FONEs 10 rd:5 110110 00000 0 0111 1111 00000
+ ]
+ NCP 10 ----- 110110 ----- --------- ----- # v8 CPop1
+}
+
+NCP 10 ----- 110111 ----- --------- ----- # v8 CPop2
+
+##
+## Major Opcode 11 -- load and store instructions
+##
+
+%dfp_rd 25:5 !function=extract_dfpreg
+%qfp_rd 25:5 !function=extract_qfpreg
+
+&r_r_ri_asi rd rs1 rs2_or_imm asi imm:bool
+@r_r_ri_na .. rd:5 ...... rs1:5 imm:1 rs2_or_imm:s13 &r_r_ri_asi asi=-1
+@d_r_ri_na .. ..... ...... rs1:5 imm:1 rs2_or_imm:s13 \
+ &r_r_ri_asi rd=%dfp_rd asi=-1
+@q_r_ri_na .. ..... ...... rs1:5 imm:1 rs2_or_imm:s13 \
+ &r_r_ri_asi rd=%qfp_rd asi=-1
+
+@r_r_r_asi .. rd:5 ...... rs1:5 0 asi:8 rs2_or_imm:5 &r_r_ri_asi imm=0
+@r_r_i_asi .. rd:5 ...... rs1:5 1 rs2_or_imm:s13 \
+ &r_r_ri_asi imm=1 asi=-2
+@d_r_r_asi .. ..... ...... rs1:5 0 asi:8 rs2_or_imm:5 \
+ &r_r_ri_asi rd=%dfp_rd imm=0
+@d_r_i_asi .. ..... ...... rs1:5 1 rs2_or_imm:s13 \
+ &r_r_ri_asi rd=%dfp_rd imm=1 asi=-2
+@q_r_r_asi .. ..... ...... rs1:5 0 asi:8 rs2_or_imm:5 \
+ &r_r_ri_asi rd=%qfp_rd imm=0
+@q_r_i_asi .. ..... ...... rs1:5 1 rs2_or_imm:s13 \
+ &r_r_ri_asi rd=%qfp_rd imm=1 asi=-2
+@casa_imm .. rd:5 ...... rs1:5 1 00000000 rs2_or_imm:5 \
+ &r_r_ri_asi imm=1 asi=-2
+
+LDUW 11 ..... 000000 ..... . ............. @r_r_ri_na
+LDUB 11 ..... 000001 ..... . ............. @r_r_ri_na
+LDUH 11 ..... 000010 ..... . ............. @r_r_ri_na
+LDD 11 ..... 000011 ..... . ............. @r_r_ri_na
+LDSW 11 ..... 001000 ..... . ............. @r_r_ri_na
+LDSB 11 ..... 001001 ..... . ............. @r_r_ri_na
+LDSH 11 ..... 001010 ..... . ............. @r_r_ri_na
+LDX 11 ..... 001011 ..... . ............. @r_r_ri_na
+
+STW 11 ..... 000100 ..... . ............. @r_r_ri_na
+STB 11 ..... 000101 ..... . ............. @r_r_ri_na
+STH 11 ..... 000110 ..... . ............. @r_r_ri_na
+STD 11 ..... 000111 ..... . ............. @r_r_ri_na
+STX 11 ..... 001110 ..... . ............. @r_r_ri_na
+
+LDUW 11 ..... 010000 ..... . ............. @r_r_r_asi # LDUWA
+LDUW 11 ..... 010000 ..... . ............. @r_r_i_asi # LDUWA
+LDUB 11 ..... 010001 ..... . ............. @r_r_r_asi # LDUBA
+LDUB 11 ..... 010001 ..... . ............. @r_r_i_asi # LDUBA
+LDUH 11 ..... 010010 ..... . ............. @r_r_r_asi # LDUHA
+LDUH 11 ..... 010010 ..... . ............. @r_r_i_asi # LDUHA
+LDD 11 ..... 010011 ..... . ............. @r_r_r_asi # LDDA
+LDD 11 ..... 010011 ..... . ............. @r_r_i_asi # LDDA
+LDX 11 ..... 011011 ..... . ............. @r_r_r_asi # LDXA
+LDX 11 ..... 011011 ..... . ............. @r_r_i_asi # LDXA
+LDSB 11 ..... 011001 ..... . ............. @r_r_r_asi # LDSBA
+LDSB 11 ..... 011001 ..... . ............. @r_r_i_asi # LDSBA
+LDSH 11 ..... 011010 ..... . ............. @r_r_r_asi # LDSHA
+LDSH 11 ..... 011010 ..... . ............. @r_r_i_asi # LDSHA
+LDSW 11 ..... 011000 ..... . ............. @r_r_r_asi # LDSWA
+LDSW 11 ..... 011000 ..... . ............. @r_r_i_asi # LDSWA
+
+STW 11 ..... 010100 ..... . ............. @r_r_r_asi # STWA
+STW 11 ..... 010100 ..... . ............. @r_r_i_asi # STWA
+STB 11 ..... 010101 ..... . ............. @r_r_r_asi # STBA
+STB 11 ..... 010101 ..... . ............. @r_r_i_asi # STBA
+STH 11 ..... 010110 ..... . ............. @r_r_r_asi # STHA
+STH 11 ..... 010110 ..... . ............. @r_r_i_asi # STHA
+STD 11 ..... 010111 ..... . ............. @r_r_r_asi # STDA
+STD 11 ..... 010111 ..... . ............. @r_r_i_asi # STDA
+STX 11 ..... 011110 ..... . ............. @r_r_r_asi # STXA
+STX 11 ..... 011110 ..... . ............. @r_r_i_asi # STXA
+
+LDF 11 ..... 100000 ..... . ............. @r_r_ri_na
+LDFSR 11 00000 100001 ..... . ............. @n_r_ri
+LDXFSR 11 00001 100001 ..... . ............. @n_r_ri
+LDQF 11 ..... 100010 ..... . ............. @q_r_ri_na
+LDDF 11 ..... 100011 ..... . ............. @d_r_ri_na
+
+STF 11 ..... 100100 ..... . ............. @r_r_ri_na
+STFSR 11 00000 100101 ..... . ............. @n_r_ri
+STXFSR 11 00001 100101 ..... . ............. @n_r_ri
+{
+ STQF 11 ..... 100110 ..... . ............. @q_r_ri_na
+ STDFQ 11 ----- 100110 ----- - -------------
+}
+STDF 11 ..... 100111 ..... . ............. @d_r_ri_na
+
+LDSTUB 11 ..... 001101 ..... . ............. @r_r_ri_na
+LDSTUB 11 ..... 011101 ..... . ............. @r_r_r_asi # LDSTUBA
+LDSTUB 11 ..... 011101 ..... . ............. @r_r_i_asi # LDSTUBA
+
+SWAP 11 ..... 001111 ..... . ............. @r_r_ri_na
+SWAP 11 ..... 011111 ..... . ............. @r_r_r_asi # SWAPA
+SWAP 11 ..... 011111 ..... . ............. @r_r_i_asi # SWAPA
+
+CASA 11 ..... 111100 ..... . ............. @r_r_r_asi
+CASA 11 ..... 111100 ..... . ............. @casa_imm
+CASXA 11 ..... 111110 ..... . ............. @r_r_r_asi
+CASXA 11 ..... 111110 ..... . ............. @casa_imm
+
+NOP_v9 11 ----- 101101 ----- 0 00000000 ----- # PREFETCH
+NOP_v9 11 ----- 101101 ----- 1 ------------- # PREFETCH
+NOP_v9 11 ----- 111101 ----- - ------------- # PREFETCHA
+
+{
+ [
+ LDFA 11 ..... 110000 ..... . ............. @r_r_r_asi
+ LDFA 11 ..... 110000 ..... . ............. @r_r_i_asi
+ ]
+ NCP 11 ----- 110000 ----- --------- ----- # v8 LDC
+}
+NCP 11 ----- 110001 ----- --------- ----- # v8 LDCSR
+LDQFA 11 ..... 110010 ..... . ............. @q_r_r_asi
+LDQFA 11 ..... 110010 ..... . ............. @q_r_i_asi
+{
+ [
+ LDDFA 11 ..... 110011 ..... . ............. @d_r_r_asi
+ LDDFA 11 ..... 110011 ..... . ............. @d_r_i_asi
+ ]
+ NCP 11 ----- 110011 ----- --------- ----- # v8 LDDC
+}
+
+{
+ [
+ STFA 11 ..... 110100 ..... . ............. @r_r_r_asi
+ STFA 11 ..... 110100 ..... . ............. @r_r_i_asi
+ ]
+ NCP 11 ----- 110100 ----- --------- ----- # v8 STC
+}
+NCP 11 ----- 110101 ----- --------- ----- # v8 STCSR
+{
+ [
+ STQFA 11 ..... 110110 ..... . ............. @q_r_r_asi
+ STQFA 11 ..... 110110 ..... . ............. @q_r_i_asi
+ ]
+ NCP 11 ----- 110110 ----- --------- ----- # v8 STDCQ
+}
+{
+ [
+ STDFA 11 ..... 110111 ..... . ............. @d_r_r_asi
+ STDFA 11 ..... 110111 ..... . ............. @d_r_i_asi
+ ]
+ NCP 11 ----- 110111 ----- --------- ----- # v8 STDC
+}
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
index 78b03308ae..09066d5487 100644
--- a/target/sparc/ldst_helper.c
+++ b/target/sparc/ldst_helper.c
@@ -360,6 +360,7 @@ static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra)
#endif /* !CONFIG_USER_ONLY */
#endif
+#if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
static void do_check_align(CPUSPARCState *env, target_ulong addr,
uint32_t align, uintptr_t ra)
{
@@ -367,11 +368,7 @@ static void do_check_align(CPUSPARCState *env, target_ulong addr,
cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
}
}
-
-void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align)
-{
- do_check_align(env, addr, align, GETPC());
-}
+#endif
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
defined(DEBUG_MXCC)
@@ -1653,7 +1650,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
env->dmmu.sun4v_tsb_pointers[idx] = val;
} else {
- helper_raise_exception(env, TT_ILL_INSN);
+ goto illegal_insn;
}
break;
case 0x33:
@@ -1665,7 +1662,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
*/
env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val;
} else {
- helper_raise_exception(env, TT_ILL_INSN);
+ goto illegal_insn;
}
break;
case 0x35:
@@ -1682,7 +1679,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
env->immu.sun4v_tsb_pointers[idx] = val;
} else {
- helper_raise_exception(env, TT_ILL_INSN);
+ goto illegal_insn;
}
break;
case 0x37:
@@ -1694,7 +1691,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
*/
env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val;
} else {
- helper_raise_exception(env, TT_ILL_INSN);
+ goto illegal_insn;
}
break;
case ASI_UPA_CONFIG: /* UPA config */
@@ -1923,6 +1920,8 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
default:
sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
return;
+ illegal_insn:
+ cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
}
}
#endif /* CONFIG_USER_ONLY */
diff --git a/target/sparc/meson.build b/target/sparc/meson.build
index 48025cce76..c316773db6 100644
--- a/target/sparc/meson.build
+++ b/target/sparc/meson.build
@@ -1,4 +1,7 @@
+gen = decodetree.process('insns.decode')
+
sparc_ss = ss.source_set()
+sparc_ss.add(gen)
sparc_ss.add(files(
'cc_helper.c',
'cpu.c',
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index f92ff80ac8..986a88c4e1 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -25,9 +25,8 @@
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
-
+#include "tcg/tcg-op-gvec.h"
#include "exec/helper-gen.h"
-
#include "exec/translator.h"
#include "exec/log.h"
#include "asi.h"
@@ -36,6 +35,65 @@
#include "exec/helper-info.c.inc"
#undef HELPER_H
+#ifdef TARGET_SPARC64
+# define gen_helper_rdpsr(D, E) qemu_build_not_reached()
+# define gen_helper_rett(E) qemu_build_not_reached()
+# define gen_helper_power_down(E) qemu_build_not_reached()
+# define gen_helper_wrpsr(E, S) qemu_build_not_reached()
+#else
+# define gen_helper_clear_softint(E, S) qemu_build_not_reached()
+# define gen_helper_done(E) qemu_build_not_reached()
+# define gen_helper_fabsd(D, S) qemu_build_not_reached()
+# define gen_helper_flushw(E) qemu_build_not_reached()
+# define gen_helper_fnegd(D, S) qemu_build_not_reached()
+# define gen_helper_rdccr(D, E) qemu_build_not_reached()
+# define gen_helper_rdcwp(D, E) qemu_build_not_reached()
+# define gen_helper_restored(E) qemu_build_not_reached()
+# define gen_helper_retry(E) qemu_build_not_reached()
+# define gen_helper_saved(E) qemu_build_not_reached()
+# define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
+# define gen_helper_set_softint(E, S) qemu_build_not_reached()
+# define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
+# define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
+# define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
+# define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
+# define gen_helper_wrccr(E, S) qemu_build_not_reached()
+# define gen_helper_wrcwp(E, S) qemu_build_not_reached()
+# define gen_helper_wrgl(E, S) qemu_build_not_reached()
+# define gen_helper_write_softint(E, S) qemu_build_not_reached()
+# define gen_helper_wrpil(E, S) qemu_build_not_reached()
+# define gen_helper_wrpstate(E, S) qemu_build_not_reached()
+# define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
+# define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
+# define FSR_LDXFSR_MASK 0
+# define FSR_LDXFSR_OLDMASK 0
+# define MAXTL_MASK 0
+#endif
+
/* Dynamic PC, must exit to main loop. */
#define DYNAMIC_PC 1
/* Dynamic PC, one of two values according to jump_pc[T2]. */
@@ -53,21 +111,36 @@ static TCGv_i32 cpu_psr;
static TCGv cpu_fsr, cpu_pc, cpu_npc;
static TCGv cpu_regs[32];
static TCGv cpu_y;
-#ifndef CONFIG_USER_ONLY
static TCGv cpu_tbr;
-#endif
static TCGv cpu_cond;
#ifdef TARGET_SPARC64
static TCGv_i32 cpu_xcc, cpu_fprs;
static TCGv cpu_gsr;
-static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
-static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
#else
-static TCGv cpu_wim;
+# define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
+# define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
#endif
/* Floating point registers */
static TCGv_i64 cpu_fpr[TARGET_DPREGS];
+#define env_field_offsetof(X) offsetof(CPUSPARCState, X)
+#ifdef TARGET_SPARC64
+# define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
+# define env64_field_offsetof(X) env_field_offsetof(X)
+#else
+# define env32_field_offsetof(X) env_field_offsetof(X)
+# define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
+#endif
+
+typedef struct DisasDelayException {
+ struct DisasDelayException *next;
+ TCGLabel *lab;
+ TCGv_i32 excp;
+ /* Saved state at parent insn. */
+ target_ulong pc;
+ target_ulong npc;
+} DisasDelayException;
+
typedef struct DisasContext {
DisasContextBase base;
target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
@@ -89,6 +162,7 @@ typedef struct DisasContext {
int fprs_dirty;
int asi;
#endif
+ DisasDelayException *delay_excp_list;
} DisasContext;
typedef struct {
@@ -119,12 +193,6 @@ typedef struct {
#define UA2005_HTRAP_MASK 0xff
#define V8_TRAP_MASK 0x7f
-static int sign_extend(int x, int len)
-{
- len = 32 - len;
- return (x << len) >> len;
-}
-
#define IS_IMM (insn & (1<<13))
static void gen_update_fprs_dirty(DisasContext *dc, int rd)
@@ -209,69 +277,40 @@ static void gen_op_store_QT0_fpr(unsigned int dst)
offsetof(CPU_QuadU, ll.lower));
}
-static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
- TCGv_i64 v1, TCGv_i64 v2)
-{
- dst = QFPREG(dst);
-
- tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
- tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
- gen_update_fprs_dirty(dc, dst);
-}
-
-#ifdef TARGET_SPARC64
-static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
-{
- src = QFPREG(src);
- return cpu_fpr[src / 2];
-}
-
-static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
-{
- src = QFPREG(src);
- return cpu_fpr[src / 2 + 1];
-}
-
-static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
-{
- rd = QFPREG(rd);
- rs = QFPREG(rs);
-
- tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
- tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
- gen_update_fprs_dirty(dc, rd);
-}
-#endif
-
/* moves */
#ifdef CONFIG_USER_ONLY
#define supervisor(dc) 0
-#ifdef TARGET_SPARC64
#define hypervisor(dc) 0
-#endif
#else
#ifdef TARGET_SPARC64
#define hypervisor(dc) (dc->hypervisor)
#define supervisor(dc) (dc->supervisor | dc->hypervisor)
#else
#define supervisor(dc) (dc->supervisor)
+#define hypervisor(dc) 0
#endif
#endif
-#ifdef TARGET_SPARC64
-#ifndef TARGET_ABI32
-#define AM_CHECK(dc) ((dc)->address_mask_32bit)
+#if !defined(TARGET_SPARC64)
+# define AM_CHECK(dc) false
+#elif defined(TARGET_ABI32)
+# define AM_CHECK(dc) true
+#elif defined(CONFIG_USER_ONLY)
+# define AM_CHECK(dc) false
#else
-#define AM_CHECK(dc) (1)
-#endif
+# define AM_CHECK(dc) ((dc)->address_mask_32bit)
#endif
static void gen_address_mask(DisasContext *dc, TCGv addr)
{
-#ifdef TARGET_SPARC64
- if (AM_CHECK(dc))
+ if (AM_CHECK(dc)) {
tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
-#endif
+ }
+}
+
+static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
+{
+ return AM_CHECK(dc) ? (uint32_t)addr : addr;
}
static TCGv gen_load_gpr(DisasContext *dc, int reg)
@@ -402,71 +441,89 @@ static TCGv_i32 gen_sub32_carry32(void)
return carry_32;
}
-static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
- TCGv src2, int update_cc)
+static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
+ TCGv_i32 carry_32, bool update_cc)
{
- TCGv_i32 carry_32;
- TCGv carry;
+ tcg_gen_add_tl(dst, src1, src2);
- switch (dc->cc_op) {
- case CC_OP_DIV:
- case CC_OP_LOGIC:
- /* Carry is known to be zero. Fall back to plain ADD. */
- if (update_cc) {
- gen_op_add_cc(dst, src1, src2);
- } else {
- tcg_gen_add_tl(dst, src1, src2);
- }
- return;
+#ifdef TARGET_SPARC64
+ TCGv carry = tcg_temp_new();
+ tcg_gen_extu_i32_tl(carry, carry_32);
+ tcg_gen_add_tl(dst, dst, carry);
+#else
+ tcg_gen_add_i32(dst, dst, carry_32);
+#endif
- case CC_OP_ADD:
- case CC_OP_TADD:
- case CC_OP_TADDTV:
- if (TARGET_LONG_BITS == 32) {
- /* We can re-use the host's hardware carry generation by using
- an ADD2 opcode. We discard the low part of the output.
- Ideally we'd combine this operation with the add that
- generated the carry in the first place. */
- carry = tcg_temp_new();
- tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
- goto add_done;
- }
- carry_32 = gen_add32_carry32();
- break;
+ if (update_cc) {
+ tcg_debug_assert(dst == cpu_cc_dst);
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ }
+}
- case CC_OP_SUB:
- case CC_OP_TSUB:
- case CC_OP_TSUBTV:
- carry_32 = gen_sub32_carry32();
- break;
+static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
+{
+ TCGv discard;
- default:
- /* We need external help to produce the carry. */
- carry_32 = tcg_temp_new_i32();
- gen_helper_compute_C_icc(carry_32, tcg_env);
- break;
+ if (TARGET_LONG_BITS == 64) {
+ gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
+ return;
}
-#if TARGET_LONG_BITS == 64
- carry = tcg_temp_new();
- tcg_gen_extu_i32_i64(carry, carry_32);
-#else
- carry = carry_32;
-#endif
-
- tcg_gen_add_tl(dst, src1, src2);
- tcg_gen_add_tl(dst, dst, carry);
+ /*
+ * We can re-use the host's hardware carry generation by using
+ * an ADD2 opcode. We discard the low part of the output.
+ * Ideally we'd combine this operation with the add that
+ * generated the carry in the first place.
+ */
+ discard = tcg_temp_new();
+ tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
- add_done:
if (update_cc) {
+ tcg_debug_assert(dst == cpu_cc_dst);
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
- dc->cc_op = CC_OP_ADDX;
}
}
+static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_addc_int_add(dst, src1, src2, false);
+}
+
+static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_addc_int_add(dst, src1, src2, true);
+}
+
+static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
+}
+
+static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
+}
+
+static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
+ bool update_cc)
+{
+ TCGv_i32 carry_32 = tcg_temp_new_i32();
+ gen_helper_compute_C_icc(carry_32, tcg_env);
+ gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
+}
+
+static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_addc_int_generic(dst, src1, src2, false);
+}
+
+static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_addc_int_generic(dst, src1, src2, true);
+}
+
static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
{
tcg_gen_mov_tl(cpu_cc_src, src1);
@@ -475,51 +532,11 @@ static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
tcg_gen_mov_tl(dst, cpu_cc_dst);
}
-static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
- TCGv src2, int update_cc)
+static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2,
+ TCGv_i32 carry_32, bool update_cc)
{
- TCGv_i32 carry_32;
TCGv carry;
- switch (dc->cc_op) {
- case CC_OP_DIV:
- case CC_OP_LOGIC:
- /* Carry is known to be zero. Fall back to plain SUB. */
- if (update_cc) {
- gen_op_sub_cc(dst, src1, src2);
- } else {
- tcg_gen_sub_tl(dst, src1, src2);
- }
- return;
-
- case CC_OP_ADD:
- case CC_OP_TADD:
- case CC_OP_TADDTV:
- carry_32 = gen_add32_carry32();
- break;
-
- case CC_OP_SUB:
- case CC_OP_TSUB:
- case CC_OP_TSUBTV:
- if (TARGET_LONG_BITS == 32) {
- /* We can re-use the host's hardware carry generation by using
- a SUB2 opcode. We discard the low part of the output.
- Ideally we'd combine this operation with the add that
- generated the carry in the first place. */
- carry = tcg_temp_new();
- tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
- goto sub_done;
- }
- carry_32 = gen_sub32_carry32();
- break;
-
- default:
- /* We need external help to produce the carry. */
- carry_32 = tcg_temp_new_i32();
- gen_helper_compute_C_icc(carry_32, tcg_env);
- break;
- }
-
#if TARGET_LONG_BITS == 64
carry = tcg_temp_new();
tcg_gen_extu_i32_i64(carry, carry_32);
@@ -530,16 +547,75 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
tcg_gen_sub_tl(dst, src1, src2);
tcg_gen_sub_tl(dst, dst, carry);
- sub_done:
if (update_cc) {
+ tcg_debug_assert(dst == cpu_cc_dst);
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
- dc->cc_op = CC_OP_SUBX;
}
}
+static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false);
+}
+
+static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true);
+}
+
+static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
+{
+ TCGv discard;
+
+ if (TARGET_LONG_BITS == 64) {
+ gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc);
+ return;
+ }
+
+ /*
+ * We can re-use the host's hardware carry generation by using
+ * a SUB2 opcode. We discard the low part of the output.
+ */
+ discard = tcg_temp_new();
+ tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
+
+ if (update_cc) {
+ tcg_debug_assert(dst == cpu_cc_dst);
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ }
+}
+
+static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_subc_int_sub(dst, src1, src2, false);
+}
+
+static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_subc_int_sub(dst, src1, src2, true);
+}
+
+static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2,
+ bool update_cc)
+{
+ TCGv_i32 carry_32 = tcg_temp_new_i32();
+
+ gen_helper_compute_C_icc(carry_32, tcg_env);
+ gen_op_subc_int(dst, src1, src2, carry_32, update_cc);
+}
+
+static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_subc_int_generic(dst, src1, src2, false);
+}
+
+static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_op_subc_int_generic(dst, src1, src2, true);
+}
+
static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
{
TCGv r_temp, zero, t0;
@@ -616,6 +692,133 @@ static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
gen_op_multiply(dst, src1, src2, 1);
}
+static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_udivx(dst, tcg_env, src1, src2);
+}
+
+static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_sdivx(dst, tcg_env, src1, src2);
+}
+
+static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_udiv(dst, tcg_env, src1, src2);
+}
+
+static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_sdiv(dst, tcg_env, src1, src2);
+}
+
+static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_udiv_cc(dst, tcg_env, src1, src2);
+}
+
+static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_sdiv_cc(dst, tcg_env, src1, src2);
+}
+
+static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_taddcctv(dst, tcg_env, src1, src2);
+}
+
+static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_tsubcctv(dst, tcg_env, src1, src2);
+}
+
+static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_ctpop_tl(dst, src2);
+}
+
+#ifndef TARGET_SPARC64
+static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
+{
+ g_assert_not_reached();
+}
+#endif
+
+static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_array8(dst, src1, src2);
+ tcg_gen_shli_tl(dst, dst, 1);
+}
+
+static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_array8(dst, src1, src2);
+ tcg_gen_shli_tl(dst, dst, 2);
+}
+
+static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
+{
+#ifdef TARGET_SPARC64
+ gen_helper_fpack16(dst, cpu_gsr, src);
+#else
+ g_assert_not_reached();
+#endif
+}
+
+static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
+{
+#ifdef TARGET_SPARC64
+ gen_helper_fpackfix(dst, cpu_gsr, src);
+#else
+ g_assert_not_reached();
+#endif
+}
+
+static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
+{
+#ifdef TARGET_SPARC64
+ gen_helper_fpack32(dst, cpu_gsr, src1, src2);
+#else
+ g_assert_not_reached();
+#endif
+}
+
+static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
+{
+#ifdef TARGET_SPARC64
+ TCGv t1, t2, shift;
+
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ shift = tcg_temp_new();
+
+ tcg_gen_andi_tl(shift, cpu_gsr, 7);
+ tcg_gen_shli_tl(shift, shift, 3);
+ tcg_gen_shl_tl(t1, s1, shift);
+
+ /*
+ * A shift of 64 does not produce 0 in TCG. Divide this into a
+ * shift of (up to 63) followed by a constant shift of 1.
+ */
+ tcg_gen_xori_tl(shift, shift, 63);
+ tcg_gen_shr_tl(t2, s2, shift);
+ tcg_gen_shri_tl(t2, t2, 1);
+
+ tcg_gen_or_tl(dst, t1, t2);
+#else
+ g_assert_not_reached();
+#endif
+}
+
+static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
+{
+#ifdef TARGET_SPARC64
+ gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
+#else
+ g_assert_not_reached();
+#endif
+}
+
// 1
static void gen_op_eval_ba(TCGv dst)
{
@@ -884,47 +1087,6 @@ static void gen_branch2(DisasContext *dc, target_ulong pc1,
gen_goto_tb(dc, 1, pc2, pc2 + 4);
}
-static void gen_branch_a(DisasContext *dc, target_ulong pc1)
-{
- TCGLabel *l1 = gen_new_label();
- target_ulong npc = dc->npc;
-
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
-
- gen_goto_tb(dc, 0, npc, pc1);
-
- gen_set_label(l1);
- gen_goto_tb(dc, 1, npc + 4, npc + 8);
-
- dc->base.is_jmp = DISAS_NORETURN;
-}
-
-static void gen_branch_n(DisasContext *dc, target_ulong pc1)
-{
- target_ulong npc = dc->npc;
-
- if (npc & 3) {
- switch (npc) {
- case DYNAMIC_PC:
- case DYNAMIC_PC_LOOKUP:
- tcg_gen_mov_tl(cpu_pc, cpu_npc);
- tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
- tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc,
- cpu_cond, tcg_constant_tl(0),
- tcg_constant_tl(pc1), cpu_npc);
- dc->pc = npc;
- break;
- default:
- g_assert_not_reached();
- }
- } else {
- dc->pc = npc;
- dc->jump_pc[0] = pc1;
- dc->jump_pc[1] = npc + 4;
- dc->npc = JUMP_PC;
- }
-}
-
static void gen_generic_branch(DisasContext *dc)
{
TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
@@ -984,9 +1146,38 @@ static void gen_exception(DisasContext *dc, int which)
dc->base.is_jmp = DISAS_NORETURN;
}
-static void gen_check_align(TCGv addr, int mask)
+static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
{
- gen_helper_check_align(tcg_env, addr, tcg_constant_i32(mask));
+ DisasDelayException *e = g_new0(DisasDelayException, 1);
+
+ e->next = dc->delay_excp_list;
+ dc->delay_excp_list = e;
+
+ e->lab = gen_new_label();
+ e->excp = excp;
+ e->pc = dc->pc;
+ /* Caller must have used flush_cond before branch. */
+ assert(e->npc != JUMP_PC);
+ e->npc = dc->npc;
+
+ return e->lab;
+}
+
+static TCGLabel *delay_exception(DisasContext *dc, int excp)
+{
+ return delay_exceptionv(dc, tcg_constant_i32(excp));
+}
+
+static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
+{
+ TCGv t = tcg_temp_new();
+ TCGLabel *lab;
+
+ tcg_gen_andi_tl(t, addr, mask);
+
+ flush_cond(dc);
+ lab = delay_exception(dc, TT_UNALIGNED);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
}
static void gen_mov_pc_npc(DisasContext *dc)
@@ -1264,41 +1455,13 @@ static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
}
}
-static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
- DisasContext *dc)
-{
- DisasCompare cmp;
- gen_compare(&cmp, cc, cond, dc);
-
- /* The interface is to return a boolean in r_dst. */
- if (cmp.is_bool) {
- tcg_gen_mov_tl(r_dst, cmp.c1);
- } else {
- tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
- }
-}
-
-static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
-{
- DisasCompare cmp;
- gen_fcompare(&cmp, cc, cond);
-
- /* The interface is to return a boolean in r_dst. */
- if (cmp.is_bool) {
- tcg_gen_mov_tl(r_dst, cmp.c1);
- } else {
- tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
- }
-}
-
-#ifdef TARGET_SPARC64
// Inverted logic
-static const int gen_tcg_cond_reg[8] = {
- -1,
+static const TCGCond gen_tcg_cond_reg[8] = {
+ TCG_COND_NEVER, /* reserved */
TCG_COND_NE,
TCG_COND_GT,
TCG_COND_GE,
- -1,
+ TCG_COND_NEVER, /* reserved */
TCG_COND_EQ,
TCG_COND_LE,
TCG_COND_LT,
@@ -1312,115 +1475,48 @@ static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
cmp->c2 = tcg_constant_tl(0);
}
-static void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
+static void gen_op_clear_ieee_excp_and_FTT(void)
{
- DisasCompare cmp;
- gen_compare_reg(&cmp, cond, r_src);
-
- /* The interface is to return a boolean in r_dst. */
- tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
}
-#endif
-static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
+static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
{
- unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
- target_ulong target = dc->pc + offset;
+ gen_op_clear_ieee_excp_and_FTT();
+ tcg_gen_mov_i32(dst, src);
+}
-#ifdef TARGET_SPARC64
- if (unlikely(AM_CHECK(dc))) {
- target &= 0xffffffffULL;
- }
-#endif
- if (cond == 0x0) {
- /* unconditional not taken */
- if (a) {
- dc->pc = dc->npc + 4;
- dc->npc = dc->pc + 4;
- } else {
- dc->pc = dc->npc;
- dc->npc = dc->pc + 4;
- }
- } else if (cond == 0x8) {
- /* unconditional taken */
- if (a) {
- dc->pc = target;
- dc->npc = dc->pc + 4;
- } else {
- dc->pc = dc->npc;
- dc->npc = target;
- tcg_gen_mov_tl(cpu_pc, cpu_npc);
- }
- } else {
- flush_cond(dc);
- gen_cond(cpu_cond, cc, cond, dc);
- if (a) {
- gen_branch_a(dc, target);
- } else {
- gen_branch_n(dc, target);
- }
- }
+static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
+{
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_helper_fnegs(dst, src);
}
-static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
+static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
{
- unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
- target_ulong target = dc->pc + offset;
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_helper_fabss(dst, src);
+}
-#ifdef TARGET_SPARC64
- if (unlikely(AM_CHECK(dc))) {
- target &= 0xffffffffULL;
- }
-#endif
- if (cond == 0x0) {
- /* unconditional not taken */
- if (a) {
- dc->pc = dc->npc + 4;
- dc->npc = dc->pc + 4;
- } else {
- dc->pc = dc->npc;
- dc->npc = dc->pc + 4;
- }
- } else if (cond == 0x8) {
- /* unconditional taken */
- if (a) {
- dc->pc = target;
- dc->npc = dc->pc + 4;
- } else {
- dc->pc = dc->npc;
- dc->npc = target;
- tcg_gen_mov_tl(cpu_pc, cpu_npc);
- }
- } else {
- flush_cond(dc);
- gen_fcond(cpu_cond, cc, cond);
- if (a) {
- gen_branch_a(dc, target);
- } else {
- gen_branch_n(dc, target);
- }
- }
+static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
+{
+ gen_op_clear_ieee_excp_and_FTT();
+ tcg_gen_mov_i64(dst, src);
}
-#ifdef TARGET_SPARC64
-static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
- TCGv r_reg)
+static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
{
- unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
- target_ulong target = dc->pc + offset;
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_helper_fnegd(dst, src);
+}
- if (unlikely(AM_CHECK(dc))) {
- target &= 0xffffffffULL;
- }
- flush_cond(dc);
- gen_cond_reg(cpu_cond, cond, r_reg);
- if (a) {
- gen_branch_a(dc, target);
- } else {
- gen_branch_n(dc, target);
- }
+static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
+{
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_helper_fabsd(dst, src);
}
+#ifdef TARGET_SPARC64
static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
{
switch (fccno) {
@@ -1580,343 +1676,7 @@ static int gen_trap_ifnofpu(DisasContext *dc)
return 0;
}
-static void gen_op_clear_ieee_excp_and_FTT(void)
-{
- tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
-}
-
-static void gen_fop_FF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
-{
- TCGv_i32 dst, src;
-
- src = gen_load_fpr_F(dc, rs);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, tcg_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_i32))
-{
- TCGv_i32 dst, src;
-
- src = gen_load_fpr_F(dc, rs);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, src);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 dst, src1, src2;
-
- src1 = gen_load_fpr_F(dc, rs1);
- src2 = gen_load_fpr_F(dc, rs2);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, tcg_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-#ifdef TARGET_SPARC64
-static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 dst, src1, src2;
-
- src1 = gen_load_fpr_F(dc, rs1);
- src2 = gen_load_fpr_F(dc, rs2);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, src1, src2);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-#endif
-
-static void gen_fop_DD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
-{
- TCGv_i64 dst, src;
-
- src = gen_load_fpr_D(dc, rs);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-#ifdef TARGET_SPARC64
-static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src;
-
- src = gen_load_fpr_D(dc, rs);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, src);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-#endif
-
-static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-#ifdef TARGET_SPARC64
-static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, src1, src2);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, cpu_gsr, src1, src2);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 dst, src0, src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
- src0 = gen_load_fpr_D(dc, rd);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, src0, src1, src2);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-#endif
-
-static void gen_fop_QQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr))
-{
- gen_op_load_fpr_QT1(QFPREG(rs));
-
- gen(tcg_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-#ifdef TARGET_SPARC64
-static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr))
-{
- gen_op_load_fpr_QT1(QFPREG(rs));
-
- gen(tcg_env);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-#endif
-
-static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_ptr))
-{
- gen_op_load_fpr_QT0(QFPREG(rs1));
- gen_op_load_fpr_QT1(QFPREG(rs2));
-
- gen(tcg_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
-{
- TCGv_i64 dst;
- TCGv_i32 src1, src2;
-
- src1 = gen_load_fpr_F(dc, rs1);
- src2 = gen_load_fpr_F(dc, rs2);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
- void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
-{
- TCGv_i64 src1, src2;
-
- src1 = gen_load_fpr_D(dc, rs1);
- src2 = gen_load_fpr_D(dc, rs2);
-
- gen(tcg_env, src1, src2);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-#ifdef TARGET_SPARC64
-static void gen_fop_DF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
-{
- TCGv_i64 dst;
- TCGv_i32 src;
-
- src = gen_load_fpr_F(dc, rs);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-#endif
-
-static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
-{
- TCGv_i64 dst;
- TCGv_i32 src;
-
- src = gen_load_fpr_F(dc, rs);
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env, src);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_fop_FD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
-{
- TCGv_i32 dst;
- TCGv_i64 src;
-
- src = gen_load_fpr_D(dc, rs);
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, tcg_env, src);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-static void gen_fop_FQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i32, TCGv_ptr))
-{
- TCGv_i32 dst;
-
- gen_op_load_fpr_QT1(QFPREG(rs));
- dst = gen_dest_fpr_F(dc);
-
- gen(dst, tcg_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_F(dc, rd, dst);
-}
-
-static void gen_fop_DQ(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_i64, TCGv_ptr))
-{
- TCGv_i64 dst;
-
- gen_op_load_fpr_QT1(QFPREG(rs));
- dst = gen_dest_fpr_D(dc, rd);
-
- gen(dst, tcg_env);
- gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
-
- gen_store_fpr_D(dc, rd, dst);
-}
-
-static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr, TCGv_i32))
-{
- TCGv_i32 src;
-
- src = gen_load_fpr_F(dc, rs);
-
- gen(tcg_env, src);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
- void (*gen)(TCGv_ptr, TCGv_i64))
-{
- TCGv_i64 src;
-
- src = gen_load_fpr_D(dc, rs);
-
- gen(tcg_env, src);
-
- gen_op_store_QT0_fpr(QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
-}
-
-static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
- TCGv addr, int mmu_idx, MemOp memop)
-{
- gen_address_mask(dc, addr);
- tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
-}
-
-static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
-{
- TCGv m1 = tcg_constant_tl(0xff);
- gen_address_mask(dc, addr);
- tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
-}
-
/* asi moves */
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
typedef enum {
GET_ASI_HELPER,
GET_ASI_EXCP,
@@ -1935,15 +1695,25 @@ typedef struct {
MemOp memop;
} DisasASI;
-static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
+/*
+ * Build DisasASI.
+ * For asi == -1, treat as non-asi.
+ * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
+ */
+static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
{
- int asi = GET_FIELD(insn, 19, 26);
ASIType type = GET_ASI_HELPER;
int mem_idx = dc->mem_idx;
+ if (asi == -1) {
+ /* Artificial "non-asi" case. */
+ type = GET_ASI_DIRECT;
+ goto done;
+ }
+
#ifndef TARGET_SPARC64
/* Before v9, all asis are immediate and privileged. */
- if (IS_IMM) {
+ if (asi < 0) {
gen_exception(dc, TT_ILL_INSN);
type = GET_ASI_EXCP;
} else if (supervisor(dc)
@@ -1986,7 +1756,7 @@ static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
type = GET_ASI_EXCP;
}
#else
- if (IS_IMM) {
+ if (asi < 0) {
asi = dc->asi;
}
/* With v9, all asis below 0x80 are privileged. */
@@ -2145,28 +1915,39 @@ static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
}
#endif
+ done:
return (DisasASI){ type, asi, mem_idx, memop };
}
-static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
- int insn, MemOp memop)
+#if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
+ TCGv_i32 asi, TCGv_i32 mop)
+{
+ g_assert_not_reached();
+}
+
+static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
+ TCGv_i32 asi, TCGv_i32 mop)
{
- DisasASI da = get_asi(dc, insn, memop);
+ g_assert_not_reached();
+}
+#endif
- switch (da.type) {
+static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
+{
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DTWINX: /* Reserved for ldda. */
gen_exception(dc, TT_ILL_INSN);
break;
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
break;
default:
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
save_state(dc);
#ifdef TARGET_SPARC64
@@ -2183,34 +1964,30 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
}
}
-static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
- int insn, MemOp memop)
+static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
{
- DisasASI da = get_asi(dc, insn, memop);
-
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
+
case GET_ASI_DTWINX: /* Reserved for stda. */
-#ifndef TARGET_SPARC64
- gen_exception(dc, TT_ILL_INSN);
- break;
-#else
- if (!(dc->def->features & CPU_FEATURE_HYPV)) {
+ if (TARGET_LONG_BITS == 32) {
+ gen_exception(dc, TT_ILL_INSN);
+ break;
+ } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
/* Pre OpenSPARC CPUs don't have these */
gen_exception(dc, TT_ILL_INSN);
- return;
+ break;
}
- /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
- * are ST_BLKINIT_ ASIs */
-#endif
+ /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
/* fall through */
+
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
break;
-#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+
case GET_ASI_BCOPY:
+ assert(TARGET_LONG_BITS == 32);
/* Copy 32 bytes from the address in SRC to ADDR. */
/* ??? The original qemu code suggests 4-byte alignment, dropping
the low bits, but the only place I can see this used is in the
@@ -2228,18 +2005,18 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
for (i = 0; i < 32; i += 4) {
/* Since the loads and stores are paired, allow the
copy to happen in the host endianness. */
- tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
- tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
+ tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
+ tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
tcg_gen_add_tl(saddr, saddr, four);
tcg_gen_add_tl(daddr, daddr, four);
}
}
break;
-#endif
+
default:
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
save_state(dc);
#ifdef TARGET_SPARC64
@@ -2259,16 +2036,15 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
}
}
-static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
- TCGv addr, int insn)
+static void gen_swap_asi(DisasContext *dc, DisasASI *da,
+ TCGv dst, TCGv src, TCGv addr)
{
- DisasASI da = get_asi(dc, insn, MO_TEUL);
-
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
+ tcg_gen_atomic_xchg_tl(dst, addr, src,
+ da->mem_idx, da->memop | MO_ALIGN);
break;
default:
/* ??? Should be DAE_invalid_asi. */
@@ -2277,20 +2053,15 @@ static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
}
}
-static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
- int insn, int rd)
+static void gen_cas_asi(DisasContext *dc, DisasASI *da,
+ TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
{
- DisasASI da = get_asi(dc, insn, MO_TEUL);
- TCGv oldv;
-
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
return;
case GET_ASI_DIRECT:
- oldv = tcg_temp_new();
- tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
- da.mem_idx, da.memop | MO_ALIGN);
- gen_store_gpr(dc, rd, oldv);
+ tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
+ da->mem_idx, da->memop | MO_ALIGN);
break;
default:
/* ??? Should be DAE_invalid_asi. */
@@ -2299,15 +2070,14 @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
}
}
-static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
+static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
{
- DisasASI da = get_asi(dc, insn, MO_UB);
-
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_ldstub(dc, dst, addr, da.mem_idx);
+ tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
+ da->mem_idx, MO_UB);
break;
default:
/* ??? In theory, this should be raise DAE_invalid_asi.
@@ -2315,7 +2085,7 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
gen_helper_exit_atomic(tcg_env);
} else {
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
TCGv_i64 s64, t64;
@@ -2334,38 +2104,44 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
break;
}
}
-#endif
-#ifdef TARGET_SPARC64
-static void gen_ldf_asi(DisasContext *dc, TCGv addr,
- int insn, int size, int rd)
+static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
+ TCGv addr, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
+ MemOp memop = da->memop;
+ MemOp size = memop & MO_SIZE;
TCGv_i32 d32;
TCGv_i64 d64;
+ TCGv addr_tmp;
+
+ /* TODO: Use 128-bit load/store below. */
+ if (size == MO_128) {
+ memop = (memop & ~MO_SIZE) | MO_64;
+ }
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
+ memop |= MO_ALIGN_4;
switch (size) {
- case 4:
+ case MO_32:
d32 = gen_dest_fpr_F(dc);
- tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
gen_store_fpr_F(dc, rd, d32);
break;
- case 8:
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN_4);
+
+ case MO_64:
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
break;
- case 16:
+
+ case MO_128:
d64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
- da.memop | MO_ALIGN_4);
+ tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
+ addr_tmp = tcg_temp_new();
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
break;
default:
@@ -2375,24 +2151,17 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
case GET_ASI_BLOCK:
/* Valid for lddfa on aligned registers only. */
- if (size == 8 && (rd & 7) == 0) {
- MemOp memop;
- TCGv eight;
- int i;
-
- gen_address_mask(dc, addr);
-
+ if (orig_size == MO_64 && (rd & 7) == 0) {
/* The first operation checks required alignment. */
- memop = da.memop | MO_ALIGN_64;
- eight = tcg_constant_tl(8);
- for (i = 0; ; ++i) {
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
- da.mem_idx, memop);
+ addr_tmp = tcg_temp_new();
+ for (int i = 0; ; ++i) {
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
+ memop | (i == 0 ? MO_ALIGN_64 : 0));
if (i == 7) {
break;
}
- tcg_gen_add_tl(addr, addr, eight);
- memop = da.memop;
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ addr = addr_tmp;
}
} else {
gen_exception(dc, TT_ILL_INSN);
@@ -2401,10 +2170,9 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
case GET_ASI_SHORT:
/* Valid for lddfa only. */
- if (size == 8) {
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN);
+ if (orig_size == MO_64) {
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN);
} else {
gen_exception(dc, TT_ILL_INSN);
}
@@ -2412,8 +2180,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
default:
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
save_state(dc);
/* According to the table in the UA2011 manual, the only
@@ -2421,21 +2189,24 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
the NO_FAULT asis. We still need a helper for these,
but we can just use the integer asi helper for them. */
switch (size) {
- case 4:
+ case MO_32:
d64 = tcg_temp_new_i64();
gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
d32 = gen_dest_fpr_F(dc);
tcg_gen_extrl_i64_i32(d32, d64);
gen_store_fpr_F(dc, rd, d32);
break;
- case 8:
- gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop);
+ case MO_64:
+ gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
+ r_asi, r_mop);
break;
- case 16:
+ case MO_128:
d64 = tcg_temp_new_i64();
gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
- tcg_gen_addi_tl(addr, addr, 8);
- gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop);
+ addr_tmp = tcg_temp_new();
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
+ r_asi, r_mop);
tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
break;
default:
@@ -2446,37 +2217,45 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
}
}
-static void gen_stf_asi(DisasContext *dc, TCGv addr,
- int insn, int size, int rd)
+static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
+ TCGv addr, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
+ MemOp memop = da->memop;
+ MemOp size = memop & MO_SIZE;
TCGv_i32 d32;
+ TCGv addr_tmp;
+
+ /* TODO: Use 128-bit load/store below. */
+ if (size == MO_128) {
+ memop = (memop & ~MO_SIZE) | MO_64;
+ }
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
+ memop |= MO_ALIGN_4;
switch (size) {
- case 4:
+ case MO_32:
d32 = gen_load_fpr_F(dc, rd);
- tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
break;
- case 8:
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN_4);
+ case MO_64:
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN_4);
break;
- case 16:
+ case MO_128:
/* Only 4-byte alignment required. However, it is legal for the
cpu to signal the alignment fault, and the OS trap handler is
required to fix it up. Requiring 16-byte alignment here avoids
having to probe the second page before performing the first
write. */
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN_16);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN_16);
+ addr_tmp = tcg_temp_new();
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
break;
default:
g_assert_not_reached();
@@ -2485,24 +2264,17 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
case GET_ASI_BLOCK:
/* Valid for stdfa on aligned registers only. */
- if (size == 8 && (rd & 7) == 0) {
- MemOp memop;
- TCGv eight;
- int i;
-
- gen_address_mask(dc, addr);
-
+ if (orig_size == MO_64 && (rd & 7) == 0) {
/* The first operation checks required alignment. */
- memop = da.memop | MO_ALIGN_64;
- eight = tcg_constant_tl(8);
- for (i = 0; ; ++i) {
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
- da.mem_idx, memop);
+ addr_tmp = tcg_temp_new();
+ for (int i = 0; ; ++i) {
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
+ memop | (i == 0 ? MO_ALIGN_64 : 0));
if (i == 7) {
break;
}
- tcg_gen_add_tl(addr, addr, eight);
- memop = da.memop;
+ tcg_gen_addi_tl(addr_tmp, addr, 8);
+ addr = addr_tmp;
}
} else {
gen_exception(dc, TT_ILL_INSN);
@@ -2511,10 +2283,9 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
case GET_ASI_SHORT:
/* Valid for stdfa only. */
- if (size == 8) {
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN);
+ if (orig_size == MO_64) {
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN);
} else {
gen_exception(dc, TT_ILL_INSN);
}
@@ -2529,37 +2300,51 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
}
}
-static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
+static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
- TCGv_i64 hi = gen_dest_gpr(dc, rd);
- TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
+ TCGv hi = gen_dest_gpr(dc, rd);
+ TCGv lo = gen_dest_gpr(dc, rd + 1);
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
return;
case GET_ASI_DTWINX:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
+#ifdef TARGET_SPARC64
+ {
+ MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
+ TCGv_i128 t = tcg_temp_new_i128();
+
+ tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
+ /*
+ * Note that LE twinx acts as if each 64-bit register result is
+ * byte swapped. We perform one 128-bit LE load, so must swap
+ * the order of the writebacks.
+ */
+ if ((mop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr_i128_i64(lo, hi, t);
+ } else {
+ tcg_gen_extr_i128_i64(hi, lo, t);
+ }
+ }
break;
+#else
+ g_assert_not_reached();
+#endif
case GET_ASI_DIRECT:
{
TCGv_i64 tmp = tcg_temp_new_i64();
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
/* Note that LE ldda acts as if each 32-bit register
result is byte swapped. Having just performed one
64-bit bswap, we need now to swap the writebacks. */
- if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_extr32_i64(lo, hi, tmp);
+ if ((da->memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr_i64_tl(lo, hi, tmp);
} else {
- tcg_gen_extr32_i64(hi, lo, tmp);
+ tcg_gen_extr_i64_tl(hi, lo, tmp);
}
}
break;
@@ -2570,18 +2355,18 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
real hardware allows others. This can be seen with e.g.
FreeBSD 10.3 wrt ASI_IC_TAG. */
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(da.memop);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da->memop);
TCGv_i64 tmp = tcg_temp_new_i64();
save_state(dc);
gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
/* See above. */
- if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_extr32_i64(lo, hi, tmp);
+ if ((da->memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr_i64_tl(lo, hi, tmp);
} else {
- tcg_gen_extr32_i64(hi, lo, tmp);
+ tcg_gen_extr_i64_tl(hi, lo, tmp);
}
}
break;
@@ -2591,157 +2376,90 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
gen_store_gpr(dc, rd + 1, lo);
}
-static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
- int insn, int rd)
+static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
+ TCGv hi = gen_load_gpr(dc, rd);
TCGv lo = gen_load_gpr(dc, rd + 1);
- switch (da.type) {
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DTWINX:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
- tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
- break;
-
- case GET_ASI_DIRECT:
+#ifdef TARGET_SPARC64
{
- TCGv_i64 t64 = tcg_temp_new_i64();
-
- /* Note that LE stda acts as if each 32-bit register result is
- byte swapped. We will perform one 64-bit LE store, so now
- we must swap the order of the construction. */
- if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_concat32_i64(t64, lo, hi);
+ MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
+ TCGv_i128 t = tcg_temp_new_i128();
+
+ /*
+ * Note that LE twinx acts as if each 64-bit register result is
+ * byte swapped. We perform one 128-bit LE store, so must swap
+ * the order of the construction.
+ */
+ if ((mop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat_i64_i128(t, lo, hi);
} else {
- tcg_gen_concat32_i64(t64, hi, lo);
+ tcg_gen_concat_i64_i128(t, hi, lo);
}
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
}
break;
+#else
+ g_assert_not_reached();
+#endif
- default:
- /* ??? In theory we've handled all of the ASIs that are valid
- for stda, and this should raise DAE_invalid_asi. */
+ case GET_ASI_DIRECT:
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(da.memop);
TCGv_i64 t64 = tcg_temp_new_i64();
- /* See above. */
- if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_concat32_i64(t64, lo, hi);
+ /* Note that LE stda acts as if each 32-bit register result is
+ byte swapped. We will perform one 64-bit LE store, so now
+ we must swap the order of the construction. */
+ if ((da->memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat_tl_i64(t64, lo, hi);
} else {
- tcg_gen_concat32_i64(t64, hi, lo);
+ tcg_gen_concat_tl_i64(t64, hi, lo);
}
-
- save_state(dc);
- gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
+ tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
}
break;
- }
-}
-static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
- int insn, int rd)
-{
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
- TCGv oldv;
-
- switch (da.type) {
- case GET_ASI_EXCP:
- return;
- case GET_ASI_DIRECT:
- oldv = tcg_temp_new();
- tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
- da.mem_idx, da.memop | MO_ALIGN);
- gen_store_gpr(dc, rd, oldv);
- break;
- default:
- /* ??? Should be DAE_invalid_asi. */
- gen_exception(dc, TT_DATA_ACCESS);
- break;
- }
-}
-
-#elif !defined(CONFIG_USER_ONLY)
-static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
-{
- /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
- whereby "rd + 1" elicits "error: array subscript is above array".
- Since we have already asserted that rd is even, the semantics
- are unchanged. */
- TCGv lo = gen_dest_gpr(dc, rd | 1);
- TCGv hi = gen_dest_gpr(dc, rd);
- TCGv_i64 t64 = tcg_temp_new_i64();
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
-
- switch (da.type) {
- case GET_ASI_EXCP:
- return;
- case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
- break;
- default:
- {
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
-
- save_state(dc);
- gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
- }
- break;
- }
-
- tcg_gen_extr_i64_i32(lo, hi, t64);
- gen_store_gpr(dc, rd | 1, lo);
- gen_store_gpr(dc, rd, hi);
-}
-
-static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
- int insn, int rd)
-{
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
- TCGv lo = gen_load_gpr(dc, rd + 1);
- TCGv_i64 t64 = tcg_temp_new_i64();
-
- tcg_gen_concat_tl_i64(t64, lo, hi);
-
- switch (da.type) {
- case GET_ASI_EXCP:
- break;
- case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
- break;
case GET_ASI_BFILL:
+ assert(TARGET_LONG_BITS == 32);
/* Store 32 bytes of T64 to ADDR. */
/* ??? The original qemu code suggests 8-byte alignment, dropping
the low bits, but the only place I can see this used is in the
Linux kernel with 32 byte alignment, which would make more sense
as a cacheline-style operation. */
{
+ TCGv_i64 t64 = tcg_temp_new_i64();
TCGv d_addr = tcg_temp_new();
TCGv eight = tcg_constant_tl(8);
int i;
+ tcg_gen_concat_tl_i64(t64, lo, hi);
tcg_gen_andi_tl(d_addr, addr, -8);
for (i = 0; i < 32; i += 8) {
- tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
tcg_gen_add_tl(d_addr, d_addr, eight);
}
}
break;
+
default:
+ /* ??? In theory we've handled all of the ASIs that are valid
+ for stda, and this should raise DAE_invalid_asi. */
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(da->memop);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ /* See above. */
+ if ((da->memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat_tl_i64(t64, lo, hi);
+ } else {
+ tcg_gen_concat_tl_i64(t64, hi, lo);
+ }
save_state(dc);
gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
@@ -2749,30 +2467,10 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
break;
}
}
-#endif
-static TCGv get_src1(DisasContext *dc, unsigned int insn)
-{
- unsigned int rs1 = GET_FIELD(insn, 13, 17);
- return gen_load_gpr(dc, rs1);
-}
-
-static TCGv get_src2(DisasContext *dc, unsigned int insn)
-{
- if (IS_IMM) { /* immediate */
- target_long simm = GET_FIELDs(insn, 19, 31);
- TCGv t = tcg_temp_new();
- tcg_gen_movi_tl(t, simm);
- return t;
- } else { /* register */
- unsigned int rs2 = GET_FIELD(insn, 27, 31);
- return gen_load_gpr(dc, rs2);
- }
-}
-
-#ifdef TARGET_SPARC64
static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
{
+#ifdef TARGET_SPARC64
TCGv_i32 c32, zero, dst, s1, s2;
/* We have two choices here: extend the 32 bit data and use movcond_i64,
@@ -2795,19 +2493,27 @@ static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
gen_store_fpr_F(dc, rd, dst);
+#else
+ qemu_build_not_reached();
+#endif
}
static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
{
+#ifdef TARGET_SPARC64
TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
gen_load_fpr_D(dc, rs),
gen_load_fpr_D(dc, rd));
gen_store_fpr_D(dc, rd, dst);
+#else
+ qemu_build_not_reached();
+#endif
}
static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
{
+#ifdef TARGET_SPARC64
int qd = QFPREG(rd);
int qs = QFPREG(rs);
@@ -2817,10 +2523,13 @@ static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
gen_update_fprs_dirty(dc, qd);
+#else
+ qemu_build_not_reached();
+#endif
}
-#ifndef CONFIG_USER_ONLY
-static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env tcg_env)
+#ifdef TARGET_SPARC64
+static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
{
TCGv_i32 r_tl = tcg_temp_new_i32();
@@ -2843,13 +2552,1359 @@ static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env tcg_env)
}
#endif
-static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
+static int extract_dfpreg(DisasContext *dc, int x)
+{
+ return DFPREG(x);
+}
+
+static int extract_qfpreg(DisasContext *dc, int x)
+{
+ return QFPREG(x);
+}
+
+/* Include the auto-generated decoder. */
+#include "decode-insns.c.inc"
+
+#define TRANS(NAME, AVAIL, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
+ { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
+
+#define avail_ALL(C) true
+#ifdef TARGET_SPARC64
+# define avail_32(C) false
+# define avail_ASR17(C) false
+# define avail_CASA(C) true
+# define avail_DIV(C) true
+# define avail_MUL(C) true
+# define avail_POWERDOWN(C) false
+# define avail_64(C) true
+# define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
+# define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
+# define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
+# define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
+#else
+# define avail_32(C) true
+# define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
+# define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
+# define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
+# define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
+# define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
+# define avail_64(C) false
+# define avail_GL(C) false
+# define avail_HYPV(C) false
+# define avail_VIS1(C) false
+# define avail_VIS2(C) false
+#endif
+
+/* Default case for non jump instructions. */
+static bool advance_pc(DisasContext *dc)
+{
+ if (dc->npc & 3) {
+ switch (dc->npc) {
+ case DYNAMIC_PC:
+ case DYNAMIC_PC_LOOKUP:
+ dc->pc = dc->npc;
+ gen_op_next_insn();
+ break;
+ case JUMP_PC:
+ /* we can do a static jump */
+ gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
+ dc->base.is_jmp = DISAS_NORETURN;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->npc + 4;
+ }
+ return true;
+}
+
+/*
+ * Major opcodes 00 and 01 -- branches, call, and sethi
+ */
+
+static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
+{
+ if (annul) {
+ dc->pc = dc->npc + 4;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->pc + 4;
+ }
+ return true;
+}
+
+static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
+ target_ulong dest)
+{
+ if (annul) {
+ dc->pc = dest;
+ dc->npc = dest + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dest;
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ }
+ return true;
+}
+
+static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
+ bool annul, target_ulong dest)
+{
+ target_ulong npc = dc->npc;
+
+ if (annul) {
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
+ gen_goto_tb(dc, 0, npc, dest);
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, npc + 4, npc + 8);
+
+ dc->base.is_jmp = DISAS_NORETURN;
+ } else {
+ if (npc & 3) {
+ switch (npc) {
+ case DYNAMIC_PC:
+ case DYNAMIC_PC_LOOKUP:
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
+ tcg_gen_movcond_tl(cmp->cond, cpu_npc,
+ cmp->c1, cmp->c2,
+ tcg_constant_tl(dest), cpu_npc);
+ dc->pc = npc;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ dc->pc = npc;
+ dc->jump_pc[0] = dest;
+ dc->jump_pc[1] = npc + 4;
+ dc->npc = JUMP_PC;
+ if (cmp->is_bool) {
+ tcg_gen_mov_tl(cpu_cond, cmp->c1);
+ } else {
+ tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
+ }
+ }
+ }
+ return true;
+}
+
+static bool raise_priv(DisasContext *dc)
+{
+ gen_exception(dc, TT_PRIV_INSN);
+ return true;
+}
+
+static bool raise_unimpfpop(DisasContext *dc)
+{
+ gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
+ return true;
+}
+
+static bool gen_trap_float128(DisasContext *dc)
+{
+ if (dc->def->features & CPU_FEATURE_FLOAT128) {
+ return false;
+ }
+ return raise_unimpfpop(dc);
+}
+
+static bool do_bpcc(DisasContext *dc, arg_bcc *a)
+{
+ target_long target = address_mask_i(dc, dc->pc + a->i * 4);
+ DisasCompare cmp;
+
+ switch (a->cond) {
+ case 0x0:
+ return advance_jump_uncond_never(dc, a->a);
+ case 0x8:
+ return advance_jump_uncond_always(dc, a->a, target);
+ default:
+ flush_cond(dc);
+
+ gen_compare(&cmp, a->cc, a->cond, dc);
+ return advance_jump_cond(dc, &cmp, a->a, target);
+ }
+}
+
+TRANS(Bicc, ALL, do_bpcc, a)
+TRANS(BPcc, 64, do_bpcc, a)
+
+static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
+{
+ target_long target = address_mask_i(dc, dc->pc + a->i * 4);
+ DisasCompare cmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ switch (a->cond) {
+ case 0x0:
+ return advance_jump_uncond_never(dc, a->a);
+ case 0x8:
+ return advance_jump_uncond_always(dc, a->a, target);
+ default:
+ flush_cond(dc);
+
+ gen_fcompare(&cmp, a->cc, a->cond);
+ return advance_jump_cond(dc, &cmp, a->a, target);
+ }
+}
+
+TRANS(FBPfcc, 64, do_fbpfcc, a)
+TRANS(FBfcc, ALL, do_fbpfcc, a)
+
+static bool trans_BPr(DisasContext *dc, arg_BPr *a)
+{
+ target_long target = address_mask_i(dc, dc->pc + a->i * 4);
+ DisasCompare cmp;
+
+ if (!avail_64(dc)) {
+ return false;
+ }
+ if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
+ return false;
+ }
+
+ flush_cond(dc);
+ gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
+ return advance_jump_cond(dc, &cmp, a->a, target);
+}
+
+static bool trans_CALL(DisasContext *dc, arg_CALL *a)
+{
+ target_long target = address_mask_i(dc, dc->pc + a->i * 4);
+
+ gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
+ gen_mov_pc_npc(dc);
+ dc->npc = target;
+ return true;
+}
+
+static bool trans_NCP(DisasContext *dc, arg_NCP *a)
+{
+ /*
+ * For sparc32, always generate the no-coprocessor exception.
+ * For sparc64, always generate illegal instruction.
+ */
+#ifdef TARGET_SPARC64
+ return false;
+#else
+ gen_exception(dc, TT_NCP_INSN);
+ return true;
+#endif
+}
+
+static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
+{
+ /* Special-case %g0 because that's the canonical nop. */
+ if (a->rd) {
+ gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
+ }
+ return advance_pc(dc);
+}
+
+/*
+ * Major Opcode 10 -- integer, floating-point, vis, and system insns.
+ */
+
+static bool do_tcc(DisasContext *dc, int cond, int cc,
+ int rs1, bool imm, int rs2_or_imm)
+{
+ int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
+ ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
+ DisasCompare cmp;
+ TCGLabel *lab;
+ TCGv_i32 trap;
+
+ /* Trap never. */
+ if (cond == 0) {
+ return advance_pc(dc);
+ }
+
+ /*
+ * Immediate traps are the most common case. Since this value is
+ * live across the branch, it really pays to evaluate the constant.
+ */
+ if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
+ trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
+ } else {
+ trap = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
+ if (imm) {
+ tcg_gen_addi_i32(trap, trap, rs2_or_imm);
+ } else {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
+ tcg_gen_add_i32(trap, trap, t2);
+ }
+ tcg_gen_andi_i32(trap, trap, mask);
+ tcg_gen_addi_i32(trap, trap, TT_TRAP);
+ }
+
+ /* Trap always. */
+ if (cond == 8) {
+ save_state(dc);
+ gen_helper_raise_exception(tcg_env, trap);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return true;
+ }
+
+ /* Conditional trap. */
+ flush_cond(dc);
+ lab = delay_exceptionv(dc, trap);
+ gen_compare(&cmp, cc, cond, dc);
+ tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
+
+ return advance_pc(dc);
+}
+
+static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
+{
+ if (avail_32(dc) && a->cc) {
+ return false;
+ }
+ return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
+}
+
+static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
+{
+ if (avail_64(dc)) {
+ return false;
+ }
+ return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
+}
+
+static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
+{
+ if (avail_32(dc)) {
+ return false;
+ }
+ return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
+}
+
+static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
+{
+ tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
+ return advance_pc(dc);
+}
+
+static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
+{
+ if (avail_32(dc)) {
+ return false;
+ }
+ if (a->mmask) {
+ /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
+ tcg_gen_mb(a->mmask | TCG_BAR_SC);
+ }
+ if (a->cmask) {
+ /* For #Sync, etc, end the TB to recognize interrupts. */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ return advance_pc(dc);
+}
+
+static bool do_rd_special(DisasContext *dc, bool priv, int rd,
+ TCGv (*func)(DisasContext *, TCGv))
+{
+ if (!priv) {
+ return raise_priv(dc);
+ }
+ gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
+ return advance_pc(dc);
+}
+
+static TCGv do_rdy(DisasContext *dc, TCGv dst)
+{
+ return cpu_y;
+}
+
+static bool trans_RDY(DisasContext *dc, arg_RDY *a)
+{
+ /*
+ * TODO: Need a feature bit for sparcv8. In the meantime, treat all
+ * 32-bit cpus like sparcv7, which ignores the rs1 field.
+ * This matches after all other ASR, so Leon3 Asr17 is handled first.
+ */
+ if (avail_64(dc) && a->rs1 != 0) {
+ return false;
+ }
+ return do_rd_special(dc, true, a->rd, do_rdy);
+}
+
+static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
+{
+ uint32_t val;
+
+ /*
+ * TODO: There are many more fields to be filled,
+ * some of which are writable.
+ */
+ val = dc->def->nwindows - 1; /* [4:0] NWIN */
+ val |= 1 << 8; /* [8] V8 */
+
+ return tcg_constant_tl(val);
+}
+
+TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
+
+static TCGv do_rdccr(DisasContext *dc, TCGv dst)
+{
+ update_psr(dc);
+ gen_helper_rdccr(dst, tcg_env);
+ return dst;
+}
+
+TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
+
+static TCGv do_rdasi(DisasContext *dc, TCGv dst)
+{
+#ifdef TARGET_SPARC64
+ return tcg_constant_tl(dc->asi);
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
+
+static TCGv do_rdtick(DisasContext *dc, TCGv dst)
+{
+ TCGv_ptr r_tickptr = tcg_temp_new_ptr();
+
+ tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
+ if (translator_io_start(&dc->base)) {
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
+ tcg_constant_i32(dc->mem_idx));
+ return dst;
+}
+
+/* TODO: non-priv access only allowed when enabled. */
+TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
+
+static TCGv do_rdpc(DisasContext *dc, TCGv dst)
+{
+ return tcg_constant_tl(address_mask_i(dc, dc->pc));
+}
+
+TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
+
+static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ext_i32_tl(dst, cpu_fprs);
+ return dst;
+}
+
+TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
+
+static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
+{
+ gen_trap_ifnofpu(dc);
+ return cpu_gsr;
+}
+
+TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
+
+static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
+ return dst;
+}
+
+TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
+
+static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
+ return dst;
+}
+
+/* TODO: non-priv access only allowed when enabled. */
+TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
+
+static TCGv do_rdstick(DisasContext *dc, TCGv dst)
+{
+ TCGv_ptr r_tickptr = tcg_temp_new_ptr();
+
+ tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
+ if (translator_io_start(&dc->base)) {
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
+ tcg_constant_i32(dc->mem_idx));
+ return dst;
+}
+
+/* TODO: non-priv access only allowed when enabled. */
+TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
+
+static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
+ return dst;
+}
+
+/* TODO: supervisor access only allowed when enabled by hypervisor. */
+TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
+
+/*
+ * UltraSPARC-T1 Strand status.
+ * HYPV check maybe not enough, UA2005 & UA2007 describe
+ * this ASR as impl. dep
+ */
+static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
+{
+ return tcg_constant_tl(1);
+}
+
+TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
+
+static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
+{
+ update_psr(dc);
+ gen_helper_rdpsr(dst, tcg_env);
+ return dst;
+}
+
+TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
+
+static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
+ return dst;
+}
+
+TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
+
+static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
+{
+ TCGv_i32 tl = tcg_temp_new_i32();
+ TCGv_ptr tp = tcg_temp_new_ptr();
+
+ tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
+ tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
+ tcg_gen_shli_i32(tl, tl, 3);
+ tcg_gen_ext_i32_ptr(tp, tl);
+ tcg_gen_add_ptr(tp, tp, tcg_env);
+
+ tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
+ return dst;
+}
+
+TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
+
+static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
+ return dst;
+}
+
+TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
+
+static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
+ return dst;
+}
+
+TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
+
+static TCGv do_rdhver(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
+ return dst;
+}
+
+TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
+
+static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
+ return dst;
+}
+
+TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
+ do_rdhstick_cmpr)
+
+static TCGv do_rdwim(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
+ return dst;
+}
+
+TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
+
+static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
+{
+#ifdef TARGET_SPARC64
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr);
+ tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
+ return dst;
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
+
+static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
+{
+#ifdef TARGET_SPARC64
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr);
+ tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
+ return dst;
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
+
+static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
+{
+#ifdef TARGET_SPARC64
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr);
+ tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
+ return dst;
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
+
+static TCGv do_rdtt(DisasContext *dc, TCGv dst)
+{
+#ifdef TARGET_SPARC64
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr);
+ tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
+ return dst;
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
+TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
+
+static TCGv do_rdtba(DisasContext *dc, TCGv dst)
+{
+ return cpu_tbr;
+}
+
+TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
+TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
+
+static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
+ return dst;
+}
+
+TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
+
+static TCGv do_rdtl(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
+ return dst;
+}
+
+TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
+
+static TCGv do_rdpil(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
+ return dst;
+}
+
+TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
+
+static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
+{
+ gen_helper_rdcwp(dst, tcg_env);
+ return dst;
+}
+
+TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
+
+static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
+ return dst;
+}
+
+TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
+
+static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
+ return dst;
+}
+
+TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
+ do_rdcanrestore)
+
+static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
+ return dst;
+}
+
+TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
+
+static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
+ return dst;
+}
+
+TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
+
+static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
+ return dst;
+}
+
+TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
+
+static TCGv do_rdgl(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
+ return dst;
+}
+
+TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
+
+/* UA2005 strand status */
+static TCGv do_rdssr(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
+ return dst;
+}
+
+TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
+
+static TCGv do_rdver(DisasContext *dc, TCGv dst)
+{
+ tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
+ return dst;
+}
+
+TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
+
+static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
+{
+ if (avail_64(dc)) {
+ gen_helper_flushw(tcg_env);
+ return advance_pc(dc);
+ }
+ return false;
+}
+
+static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
+ void (*func)(DisasContext *, TCGv))
+{
+ TCGv src;
+
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
+ return false;
+ }
+ if (!priv) {
+ return raise_priv(dc);
+ }
+
+ if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
+ src = tcg_constant_tl(a->rs2_or_imm);
+ } else {
+ TCGv src1 = gen_load_gpr(dc, a->rs1);
+ if (a->rs2_or_imm == 0) {
+ src = src1;
+ } else {
+ src = tcg_temp_new();
+ if (a->imm) {
+ tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
+ } else {
+ tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
+ }
+ }
+ }
+ func(dc, src);
+ return advance_pc(dc);
+}
+
+static void do_wry(DisasContext *dc, TCGv src)
+{
+ tcg_gen_ext32u_tl(cpu_y, src);
+}
+
+TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
+
+static void do_wrccr(DisasContext *dc, TCGv src)
+{
+ gen_helper_wrccr(tcg_env, src);
+}
+
+TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
+
+static void do_wrasi(DisasContext *dc, TCGv src)
+{
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_ext8u_tl(tmp, src);
+ tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
+ /* End TB to notice changed ASI. */
+ dc->base.is_jmp = DISAS_EXIT;
+}
+
+TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
+
+static void do_wrfprs(DisasContext *dc, TCGv src)
+{
+#ifdef TARGET_SPARC64
+ tcg_gen_trunc_tl_i32(cpu_fprs, src);
+ dc->fprs_dirty = 0;
+ dc->base.is_jmp = DISAS_EXIT;
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
+
+static void do_wrgsr(DisasContext *dc, TCGv src)
+{
+ gen_trap_ifnofpu(dc);
+ tcg_gen_mov_tl(cpu_gsr, src);
+}
+
+TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
+
+static void do_wrsoftint_set(DisasContext *dc, TCGv src)
+{
+ gen_helper_set_softint(tcg_env, src);
+}
+
+TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
+
+static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
+{
+ gen_helper_clear_softint(tcg_env, src);
+}
+
+TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
+
+static void do_wrsoftint(DisasContext *dc, TCGv src)
+{
+ gen_helper_write_softint(tcg_env, src);
+}
+
+TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
+
+static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
+{
+ TCGv_ptr r_tickptr = tcg_temp_new_ptr();
+
+ tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
+ tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
+ translator_io_start(&dc->base);
+ gen_helper_tick_set_limit(r_tickptr, src);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+}
+
+TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
+
+static void do_wrstick(DisasContext *dc, TCGv src)
+{
+#ifdef TARGET_SPARC64
+ TCGv_ptr r_tickptr = tcg_temp_new_ptr();
+
+ tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
+ translator_io_start(&dc->base);
+ gen_helper_tick_set_count(r_tickptr, src);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
+
+static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
+{
+ TCGv_ptr r_tickptr = tcg_temp_new_ptr();
+
+ tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
+ tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
+ translator_io_start(&dc->base);
+ gen_helper_tick_set_limit(r_tickptr, src);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+}
+
+TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
+
+static void do_wrpowerdown(DisasContext *dc, TCGv src)
+{
+ save_state(dc);
+ gen_helper_power_down(tcg_env);
+}
+
+TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
+
+static void do_wrpsr(DisasContext *dc, TCGv src)
+{
+ gen_helper_wrpsr(tcg_env, src);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
+ dc->cc_op = CC_OP_FLAGS;
+ dc->base.is_jmp = DISAS_EXIT;
+}
+
+TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
+
+static void do_wrwim(DisasContext *dc, TCGv src)
+{
+ target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_andi_tl(tmp, src, mask);
+ tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
+}
+
+TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
+
+static void do_wrtpc(DisasContext *dc, TCGv src)
+{
+#ifdef TARGET_SPARC64
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr);
+ tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
+
+static void do_wrtnpc(DisasContext *dc, TCGv src)
+{
+#ifdef TARGET_SPARC64
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr);
+ tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
+
+static void do_wrtstate(DisasContext *dc, TCGv src)
+{
+#ifdef TARGET_SPARC64
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr);
+ tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
+
+static void do_wrtt(DisasContext *dc, TCGv src)
+{
+#ifdef TARGET_SPARC64
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr);
+ tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
+
+static void do_wrtick(DisasContext *dc, TCGv src)
+{
+ TCGv_ptr r_tickptr = tcg_temp_new_ptr();
+
+ tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
+ translator_io_start(&dc->base);
+ gen_helper_tick_set_count(r_tickptr, src);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+}
+
+TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
+
+static void do_wrtba(DisasContext *dc, TCGv src)
+{
+ tcg_gen_mov_tl(cpu_tbr, src);
+}
+
+TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
+
+static void do_wrpstate(DisasContext *dc, TCGv src)
+{
+ save_state(dc);
+ if (translator_io_start(&dc->base)) {
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ gen_helper_wrpstate(tcg_env, src);
+ dc->npc = DYNAMIC_PC;
+}
+
+TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
+
+static void do_wrtl(DisasContext *dc, TCGv src)
+{
+ save_state(dc);
+ tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
+ dc->npc = DYNAMIC_PC;
+}
+
+TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
+
+static void do_wrpil(DisasContext *dc, TCGv src)
+{
+ if (translator_io_start(&dc->base)) {
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ gen_helper_wrpil(tcg_env, src);
+}
+
+TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
+
+static void do_wrcwp(DisasContext *dc, TCGv src)
+{
+ gen_helper_wrcwp(tcg_env, src);
+}
+
+TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
+
+static void do_wrcansave(DisasContext *dc, TCGv src)
+{
+ tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
+}
+
+TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
+
+static void do_wrcanrestore(DisasContext *dc, TCGv src)
+{
+ tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
+}
+
+TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
+
+static void do_wrcleanwin(DisasContext *dc, TCGv src)
+{
+ tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
+}
+
+TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
+
+static void do_wrotherwin(DisasContext *dc, TCGv src)
+{
+ tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
+}
+
+TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
+
+static void do_wrwstate(DisasContext *dc, TCGv src)
+{
+ tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
+}
+
+TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
+
+static void do_wrgl(DisasContext *dc, TCGv src)
+{
+ gen_helper_wrgl(tcg_env, src);
+}
+
+TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
+
+/* UA2005 strand status */
+static void do_wrssr(DisasContext *dc, TCGv src)
+{
+ tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
+}
+
+TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
+
+TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
+
+static void do_wrhpstate(DisasContext *dc, TCGv src)
+{
+ tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
+ dc->base.is_jmp = DISAS_EXIT;
+}
+
+TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
+
+static void do_wrhtstate(DisasContext *dc, TCGv src)
+{
+ TCGv_i32 tl = tcg_temp_new_i32();
+ TCGv_ptr tp = tcg_temp_new_ptr();
+
+ tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
+ tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
+ tcg_gen_shli_i32(tl, tl, 3);
+ tcg_gen_ext_i32_ptr(tp, tl);
+ tcg_gen_add_ptr(tp, tp, tcg_env);
+
+ tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
+}
+
+TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
+
+static void do_wrhintp(DisasContext *dc, TCGv src)
+{
+ tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
+}
+
+TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
+
+static void do_wrhtba(DisasContext *dc, TCGv src)
+{
+ tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
+}
+
+TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
+
+static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
+{
+ TCGv_ptr r_tickptr = tcg_temp_new_ptr();
+
+ tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
+ tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
+ translator_io_start(&dc->base);
+ gen_helper_tick_set_limit(r_tickptr, src);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+}
+
+TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
+ do_wrhstick_cmpr)
+
+static bool do_saved_restored(DisasContext *dc, bool saved)
+{
+ if (!supervisor(dc)) {
+ return raise_priv(dc);
+ }
+ if (saved) {
+ gen_helper_saved(tcg_env);
+ } else {
+ gen_helper_restored(tcg_env);
+ }
+ return advance_pc(dc);
+}
+
+TRANS(SAVED, 64, do_saved_restored, true)
+TRANS(RESTORED, 64, do_saved_restored, false)
+
+static bool trans_NOP(DisasContext *dc, arg_NOP *a)
+{
+ return advance_pc(dc);
+}
+
+/*
+ * TODO: Need a feature bit for sparcv8.
+ * In the meantime, treat all 32-bit cpus like sparcv7.
+ */
+TRANS(NOP_v7, 32, trans_NOP, a)
+TRANS(NOP_v9, 64, trans_NOP, a)
+
+static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*funci)(TCGv, TCGv, target_long))
+{
+ TCGv dst, src1;
+
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!a->imm && a->rs2_or_imm & ~0x1f) {
+ return false;
+ }
+
+ if (a->cc) {
+ dst = cpu_cc_dst;
+ } else {
+ dst = gen_dest_gpr(dc, a->rd);
+ }
+ src1 = gen_load_gpr(dc, a->rs1);
+
+ if (a->imm || a->rs2_or_imm == 0) {
+ if (funci) {
+ funci(dst, src1, a->rs2_or_imm);
+ } else {
+ func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
+ }
+ } else {
+ func(dst, src1, cpu_regs[a->rs2_or_imm]);
+ }
+ gen_store_gpr(dc, a->rd, dst);
+
+ if (a->cc) {
+ tcg_gen_movi_i32(cpu_cc_op, cc_op);
+ dc->cc_op = cc_op;
+ }
+ return advance_pc(dc);
+}
+
+static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*funci)(TCGv, TCGv, target_long),
+ void (*func_cc)(TCGv, TCGv, TCGv))
+{
+ if (a->cc) {
+ assert(cc_op >= 0);
+ return do_arith_int(dc, a, cc_op, func_cc, NULL);
+ }
+ return do_arith_int(dc, a, cc_op, func, funci);
+}
+
+static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*funci)(TCGv, TCGv, target_long))
+{
+ return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
+}
+
+TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
+ tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
+TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
+ tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
+
+TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc)
+TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc)
+TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv)
+TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv)
+
+TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
+TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
+TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
+TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
+TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
+
+TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
+TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
+TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
+
+TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL)
+TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL)
+TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc)
+TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc)
+
+/* TODO: Should have feature bit -- comes in with UltraSparc T2. */
+TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL)
+
+static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
+{
+ /* OR with %g0 is the canonical alias for MOV. */
+ if (!a->cc && a->rs1 == 0) {
+ if (a->imm || a->rs2_or_imm == 0) {
+ gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
+ } else if (a->rs2_or_imm & ~0x1f) {
+ /* For simplicity, we under-decoded the rs2 form. */
+ return false;
+ } else {
+ gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
+ }
+ return advance_pc(dc);
+ }
+ return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
+}
+
+static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
+{
+ switch (dc->cc_op) {
+ case CC_OP_DIV:
+ case CC_OP_LOGIC:
+ /* Carry is known to be zero. Fall back to plain ADD. */
+ return do_arith(dc, a, CC_OP_ADD,
+ tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
+ case CC_OP_ADD:
+ case CC_OP_TADD:
+ case CC_OP_TADDTV:
+ return do_arith(dc, a, CC_OP_ADDX,
+ gen_op_addc_add, NULL, gen_op_addccc_add);
+ case CC_OP_SUB:
+ case CC_OP_TSUB:
+ case CC_OP_TSUBTV:
+ return do_arith(dc, a, CC_OP_ADDX,
+ gen_op_addc_sub, NULL, gen_op_addccc_sub);
+ default:
+ return do_arith(dc, a, CC_OP_ADDX,
+ gen_op_addc_generic, NULL, gen_op_addccc_generic);
+ }
+}
+
+static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a)
+{
+ switch (dc->cc_op) {
+ case CC_OP_DIV:
+ case CC_OP_LOGIC:
+ /* Carry is known to be zero. Fall back to plain SUB. */
+ return do_arith(dc, a, CC_OP_SUB,
+ tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc);
+ case CC_OP_ADD:
+ case CC_OP_TADD:
+ case CC_OP_TADDTV:
+ return do_arith(dc, a, CC_OP_SUBX,
+ gen_op_subc_add, NULL, gen_op_subccc_add);
+ case CC_OP_SUB:
+ case CC_OP_TSUB:
+ case CC_OP_TSUBTV:
+ return do_arith(dc, a, CC_OP_SUBX,
+ gen_op_subc_sub, NULL, gen_op_subccc_sub);
+ default:
+ return do_arith(dc, a, CC_OP_SUBX,
+ gen_op_subc_generic, NULL, gen_op_subccc_generic);
+ }
+}
+
+static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a)
+{
+ update_psr(dc);
+ return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc);
+}
+
+static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
int width, bool cc, bool left)
{
- TCGv lo1, lo2;
+ TCGv dst, s1, s2, lo1, lo2;
uint64_t amask, tabl, tabr;
int shift, imask, omask;
+ dst = gen_dest_gpr(dc, a->rd);
+ s1 = gen_load_gpr(dc, a->rs1);
+ s2 = gen_load_gpr(dc, a->rs2);
+
if (cc) {
tcg_gen_mov_tl(cpu_cc_src, s1);
tcg_gen_mov_tl(cpu_cc_src2, s2);
@@ -2858,13 +3913,15 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
dc->cc_op = CC_OP_SUB;
}
- /* Theory of operation: there are two tables, left and right (not to
- be confused with the left and right versions of the opcode). These
- are indexed by the low 3 bits of the inputs. To make things "easy",
- these tables are loaded into two constants, TABL and TABR below.
- The operation index = (input & imask) << shift calculates the index
- into the constant, while val = (table >> index) & omask calculates
- the value we're looking for. */
+ /*
+ * Theory of operation: there are two tables, left and right (not to
+ * be confused with the left and right versions of the opcode). These
+ * are indexed by the low 3 bits of the inputs. To make things "easy",
+ * these tables are loaded into two constants, TABL and TABR below.
+ * The operation index = (input & imask) << shift calculates the index
+ * into the constant, while val = (table >> index) & omask calculates
+ * the value we're looking for.
+ */
switch (width) {
case 8:
imask = 0x7;
@@ -2918,2653 +3975,1345 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
tcg_gen_andi_tl(lo1, lo1, omask);
tcg_gen_andi_tl(lo2, lo2, omask);
- amask = -8;
- if (AM_CHECK(dc)) {
- amask &= 0xffffffffULL;
- }
+ amask = address_mask_i(dc, -8);
tcg_gen_andi_tl(s1, s1, amask);
tcg_gen_andi_tl(s2, s2, amask);
/* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
tcg_gen_and_tl(lo2, lo2, lo1);
tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
+
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
+TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
+TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
+TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
+TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
+TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
+
+TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
+TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
+TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
+TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
+TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
+TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
+
+static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv, TCGv, TCGv))
+{
+ TCGv dst = gen_dest_gpr(dc, a->rd);
+ TCGv src1 = gen_load_gpr(dc, a->rs1);
+ TCGv src2 = gen_load_gpr(dc, a->rs2);
+
+ func(dst, src1, src2);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
}
-static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
+TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
+TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
+TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
+
+static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
{
+#ifdef TARGET_SPARC64
TCGv tmp = tcg_temp_new();
tcg_gen_add_tl(tmp, s1, s2);
tcg_gen_andi_tl(dst, tmp, -8);
- if (left) {
- tcg_gen_neg_tl(tmp, tmp);
- }
tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
+#else
+ g_assert_not_reached();
+#endif
}
-static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
+static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
{
- TCGv t1, t2, shift;
-
- t1 = tcg_temp_new();
- t2 = tcg_temp_new();
- shift = tcg_temp_new();
+#ifdef TARGET_SPARC64
+ TCGv tmp = tcg_temp_new();
- tcg_gen_andi_tl(shift, gsr, 7);
- tcg_gen_shli_tl(shift, shift, 3);
- tcg_gen_shl_tl(t1, s1, shift);
+ tcg_gen_add_tl(tmp, s1, s2);
+ tcg_gen_andi_tl(dst, tmp, -8);
+ tcg_gen_neg_tl(tmp, tmp);
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
+#else
+ g_assert_not_reached();
+#endif
+}
- /* A shift of 64 does not produce 0 in TCG. Divide this into a
- shift of (up to 63) followed by a constant shift of 1. */
- tcg_gen_xori_tl(shift, shift, 63);
- tcg_gen_shr_tl(t2, s2, shift);
- tcg_gen_shri_tl(t2, t2, 1);
+TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
+TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
- tcg_gen_or_tl(dst, t1, t2);
-}
+static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
+{
+#ifdef TARGET_SPARC64
+ tcg_gen_add_tl(dst, s1, s2);
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
+#else
+ g_assert_not_reached();
#endif
+}
-#define CHECK_IU_FEATURE(dc, FEATURE) \
- if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
- goto illegal_insn;
-#define CHECK_FPU_FEATURE(dc, FEATURE) \
- if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
- goto nfpu_insn;
+TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
-/* before an instruction, dc->pc must be static */
-static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
+static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
{
- unsigned int opc, rs1, rs2, rd;
- TCGv cpu_src1, cpu_src2;
- TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
- TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
- target_long simm;
+ TCGv dst, src1, src2;
- opc = GET_FIELD(insn, 0, 1);
- rd = GET_FIELD(insn, 2, 6);
+ /* Reject 64-bit shifts for sparc32. */
+ if (avail_32(dc) && a->x) {
+ return false;
+ }
- switch (opc) {
- case 0: /* branches/sethi */
- {
- unsigned int xop = GET_FIELD(insn, 7, 9);
- int32_t target;
- switch (xop) {
-#ifdef TARGET_SPARC64
- case 0x1: /* V9 BPcc */
- {
- int cc;
-
- target = GET_FIELD_SP(insn, 0, 18);
- target = sign_extend(target, 19);
- target <<= 2;
- cc = GET_FIELD_SP(insn, 20, 21);
- if (cc == 0)
- do_branch(dc, target, insn, 0);
- else if (cc == 2)
- do_branch(dc, target, insn, 1);
- else
- goto illegal_insn;
- goto jmp_insn;
- }
- case 0x3: /* V9 BPr */
- {
- target = GET_FIELD_SP(insn, 0, 13) |
- (GET_FIELD_SP(insn, 20, 21) << 14);
- target = sign_extend(target, 16);
- target <<= 2;
- cpu_src1 = get_src1(dc, insn);
- do_branch_reg(dc, target, insn, cpu_src1);
- goto jmp_insn;
- }
- case 0x5: /* V9 FBPcc */
- {
- int cc = GET_FIELD_SP(insn, 20, 21);
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- target = GET_FIELD_SP(insn, 0, 18);
- target = sign_extend(target, 19);
- target <<= 2;
- do_fbranch(dc, target, insn, cc);
- goto jmp_insn;
- }
-#else
- case 0x7: /* CBN+x */
- {
- goto ncp_insn;
- }
-#endif
- case 0x2: /* BN+x */
- {
- target = GET_FIELD(insn, 10, 31);
- target = sign_extend(target, 22);
- target <<= 2;
- do_branch(dc, target, insn, 0);
- goto jmp_insn;
- }
- case 0x6: /* FBN+x */
- {
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- target = GET_FIELD(insn, 10, 31);
- target = sign_extend(target, 22);
- target <<= 2;
- do_fbranch(dc, target, insn, 0);
- goto jmp_insn;
- }
- case 0x4: /* SETHI */
- /* Special-case %g0 because that's the canonical nop. */
- if (rd) {
- uint32_t value = GET_FIELD(insn, 10, 31);
- TCGv t = gen_dest_gpr(dc, rd);
- tcg_gen_movi_tl(t, value << 10);
- gen_store_gpr(dc, rd, t);
- }
- break;
- case 0x0: /* UNIMPL */
- default:
- goto illegal_insn;
- }
- break;
+ src2 = tcg_temp_new();
+ tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
+ src1 = gen_load_gpr(dc, a->rs1);
+ dst = gen_dest_gpr(dc, a->rd);
+
+ if (l) {
+ tcg_gen_shl_tl(dst, src1, src2);
+ if (!a->x) {
+ tcg_gen_ext32u_tl(dst, dst);
}
- break;
- case 1: /*CALL*/
- {
- target_long target = GET_FIELDs(insn, 2, 31) << 2;
- TCGv o7 = gen_dest_gpr(dc, 15);
+ } else if (u) {
+ if (!a->x) {
+ tcg_gen_ext32u_tl(dst, src1);
+ src1 = dst;
+ }
+ tcg_gen_shr_tl(dst, src1, src2);
+ } else {
+ if (!a->x) {
+ tcg_gen_ext32s_tl(dst, src1);
+ src1 = dst;
+ }
+ tcg_gen_sar_tl(dst, src1, src2);
+ }
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
- tcg_gen_movi_tl(o7, dc->pc);
- gen_store_gpr(dc, 15, o7);
- target += dc->pc;
- gen_mov_pc_npc(dc);
-#ifdef TARGET_SPARC64
- if (unlikely(AM_CHECK(dc))) {
- target &= 0xffffffffULL;
- }
-#endif
- dc->npc = target;
+TRANS(SLL_r, ALL, do_shift_r, a, true, true)
+TRANS(SRL_r, ALL, do_shift_r, a, false, true)
+TRANS(SRA_r, ALL, do_shift_r, a, false, false)
+
+static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
+{
+ TCGv dst, src1;
+
+ /* Reject 64-bit shifts for sparc32. */
+ if (avail_32(dc) && (a->x || a->i >= 32)) {
+ return false;
+ }
+
+ src1 = gen_load_gpr(dc, a->rs1);
+ dst = gen_dest_gpr(dc, a->rd);
+
+ if (avail_32(dc) || a->x) {
+ if (l) {
+ tcg_gen_shli_tl(dst, src1, a->i);
+ } else if (u) {
+ tcg_gen_shri_tl(dst, src1, a->i);
+ } else {
+ tcg_gen_sari_tl(dst, src1, a->i);
}
- goto jmp_insn;
- case 2: /* FPU & Logical Operations */
- {
- unsigned int xop = GET_FIELD(insn, 7, 12);
- TCGv cpu_dst = tcg_temp_new();
- TCGv cpu_tmp0;
-
- if (xop == 0x3a) { /* generate trap */
- int cond = GET_FIELD(insn, 3, 6);
- TCGv_i32 trap;
- TCGLabel *l1 = NULL;
- int mask;
-
- if (cond == 0) {
- /* Trap never. */
- break;
- }
+ } else {
+ if (l) {
+ tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
+ } else if (u) {
+ tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
+ } else {
+ tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
+ }
+ }
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
- save_state(dc);
+TRANS(SLL_i, ALL, do_shift_i, a, true, true)
+TRANS(SRL_i, ALL, do_shift_i, a, false, true)
+TRANS(SRA_i, ALL, do_shift_i, a, false, false)
- if (cond != 8) {
- /* Conditional trap. */
- DisasCompare cmp;
-#ifdef TARGET_SPARC64
- /* V9 icc/xcc */
- int cc = GET_FIELD_SP(insn, 11, 12);
- if (cc == 0) {
- gen_compare(&cmp, 0, cond, dc);
- } else if (cc == 2) {
- gen_compare(&cmp, 1, cond, dc);
- } else {
- goto illegal_insn;
- }
-#else
- gen_compare(&cmp, 0, cond, dc);
-#endif
- l1 = gen_new_label();
- tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
- cmp.c1, cmp.c2, l1);
- }
+static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
+{
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!imm && rs2_or_imm & ~0x1f) {
+ return NULL;
+ }
+ if (imm || rs2_or_imm == 0) {
+ return tcg_constant_tl(rs2_or_imm);
+ } else {
+ return cpu_regs[rs2_or_imm];
+ }
+}
- mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
- ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
-
- /* Don't use the normal temporaries, as they may well have
- gone out of scope with the branch above. While we're
- doing that we might as well pre-truncate to 32-bit. */
- trap = tcg_temp_new_i32();
-
- rs1 = GET_FIELD_SP(insn, 14, 18);
- if (IS_IMM) {
- rs2 = GET_FIELD_SP(insn, 0, 7);
- if (rs1 == 0) {
- tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
- /* Signal that the trap value is fully constant. */
- mask = 0;
- } else {
- TCGv t1 = gen_load_gpr(dc, rs1);
- tcg_gen_trunc_tl_i32(trap, t1);
- tcg_gen_addi_i32(trap, trap, rs2);
- }
- } else {
- TCGv t1, t2;
- rs2 = GET_FIELD_SP(insn, 0, 4);
- t1 = gen_load_gpr(dc, rs1);
- t2 = gen_load_gpr(dc, rs2);
- tcg_gen_add_tl(t1, t1, t2);
- tcg_gen_trunc_tl_i32(trap, t1);
- }
- if (mask != 0) {
- tcg_gen_andi_i32(trap, trap, mask);
- tcg_gen_addi_i32(trap, trap, TT_TRAP);
- }
+static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
+{
+ TCGv dst = gen_load_gpr(dc, rd);
- gen_helper_raise_exception(tcg_env, trap);
+ tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
+ gen_store_gpr(dc, rd, dst);
+ return advance_pc(dc);
+}
- if (cond == 8) {
- /* An unconditional trap ends the TB. */
- dc->base.is_jmp = DISAS_NORETURN;
- goto jmp_insn;
- } else {
- /* A conditional trap falls through to the next insn. */
- gen_set_label(l1);
- break;
- }
- } else if (xop == 0x28) {
- rs1 = GET_FIELD(insn, 13, 17);
- switch(rs1) {
- case 0: /* rdy */
-#ifndef TARGET_SPARC64
- case 0x01 ... 0x0e: /* undefined in the SPARCv8
- manual, rdy on the microSPARC
- II */
- case 0x0f: /* stbar in the SPARCv8 manual,
- rdy on the microSPARC II */
- case 0x10 ... 0x1f: /* implementation-dependent in the
- SPARCv8 manual, rdy on the
- microSPARC II */
- /* Read Asr17 */
- if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
- TCGv t = gen_dest_gpr(dc, rd);
- /* Read Asr17 for a Leon3 monoprocessor */
- tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
- gen_store_gpr(dc, rd, t);
- break;
- }
-#endif
- gen_store_gpr(dc, rd, cpu_y);
- break;
-#ifdef TARGET_SPARC64
- case 0x2: /* V9 rdccr */
- update_psr(dc);
- gen_helper_rdccr(cpu_dst, tcg_env);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x3: /* V9 rdasi */
- tcg_gen_movi_tl(cpu_dst, dc->asi);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x4: /* V9 rdtick */
- {
- TCGv_ptr r_tickptr;
- TCGv_i32 r_const;
-
- r_tickptr = tcg_temp_new_ptr();
- r_const = tcg_constant_i32(dc->mem_idx);
- tcg_gen_ld_ptr(r_tickptr, tcg_env,
- offsetof(CPUSPARCState, tick));
- if (translator_io_start(&dc->base)) {
- dc->base.is_jmp = DISAS_EXIT;
- }
- gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr,
- r_const);
- gen_store_gpr(dc, rd, cpu_dst);
- }
- break;
- case 0x5: /* V9 rdpc */
- {
- TCGv t = gen_dest_gpr(dc, rd);
- if (unlikely(AM_CHECK(dc))) {
- tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
- } else {
- tcg_gen_movi_tl(t, dc->pc);
- }
- gen_store_gpr(dc, rd, t);
- }
- break;
- case 0x6: /* V9 rdfprs */
- tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0xf: /* V9 membar */
- break; /* no effect */
- case 0x13: /* Graphics Status */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_store_gpr(dc, rd, cpu_gsr);
- break;
- case 0x16: /* Softint */
- tcg_gen_ld32s_tl(cpu_dst, tcg_env,
- offsetof(CPUSPARCState, softint));
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x17: /* Tick compare */
- gen_store_gpr(dc, rd, cpu_tick_cmpr);
- break;
- case 0x18: /* System tick */
- {
- TCGv_ptr r_tickptr;
- TCGv_i32 r_const;
-
- r_tickptr = tcg_temp_new_ptr();
- r_const = tcg_constant_i32(dc->mem_idx);
- tcg_gen_ld_ptr(r_tickptr, tcg_env,
- offsetof(CPUSPARCState, stick));
- if (translator_io_start(&dc->base)) {
- dc->base.is_jmp = DISAS_EXIT;
- }
- gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr,
- r_const);
- gen_store_gpr(dc, rd, cpu_dst);
- }
- break;
- case 0x19: /* System tick compare */
- gen_store_gpr(dc, rd, cpu_stick_cmpr);
- break;
- case 0x1a: /* UltraSPARC-T1 Strand status */
- /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
- * this ASR as impl. dep
- */
- CHECK_IU_FEATURE(dc, HYPV);
- {
- TCGv t = gen_dest_gpr(dc, rd);
- tcg_gen_movi_tl(t, 1UL);
- gen_store_gpr(dc, rd, t);
- }
- break;
- case 0x10: /* Performance Control */
- case 0x11: /* Performance Instrumentation Counter */
- case 0x12: /* Dispatch Control */
- case 0x14: /* Softint set, WO */
- case 0x15: /* Softint clear, WO */
-#endif
- default:
- goto illegal_insn;
- }
-#if !defined(CONFIG_USER_ONLY)
- } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
-#ifndef TARGET_SPARC64
- if (!supervisor(dc)) {
- goto priv_insn;
- }
- update_psr(dc);
- gen_helper_rdpsr(cpu_dst, tcg_env);
-#else
- CHECK_IU_FEATURE(dc, HYPV);
- if (!hypervisor(dc))
- goto priv_insn;
- rs1 = GET_FIELD(insn, 13, 17);
- switch (rs1) {
- case 0: // hpstate
- tcg_gen_ld_i64(cpu_dst, tcg_env,
- offsetof(CPUSPARCState, hpstate));
- break;
- case 1: // htstate
- // gen_op_rdhtstate();
- break;
- case 3: // hintp
- tcg_gen_mov_tl(cpu_dst, cpu_hintp);
- break;
- case 5: // htba
- tcg_gen_mov_tl(cpu_dst, cpu_htba);
- break;
- case 6: // hver
- tcg_gen_mov_tl(cpu_dst, cpu_hver);
- break;
- case 31: // hstick_cmpr
- tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
- break;
- default:
- goto illegal_insn;
- }
-#endif
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
- if (!supervisor(dc)) {
- goto priv_insn;
- }
- cpu_tmp0 = tcg_temp_new();
-#ifdef TARGET_SPARC64
- rs1 = GET_FIELD(insn, 13, 17);
- switch (rs1) {
- case 0: // tpc
- {
- TCGv_ptr r_tsptr;
-
- r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, tcg_env);
- tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
- offsetof(trap_state, tpc));
- }
- break;
- case 1: // tnpc
- {
- TCGv_ptr r_tsptr;
-
- r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, tcg_env);
- tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
- offsetof(trap_state, tnpc));
- }
- break;
- case 2: // tstate
- {
- TCGv_ptr r_tsptr;
-
- r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, tcg_env);
- tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
- offsetof(trap_state, tstate));
- }
- break;
- case 3: // tt
- {
- TCGv_ptr r_tsptr = tcg_temp_new_ptr();
-
- gen_load_trap_state_at_tl(r_tsptr, tcg_env);
- tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
- offsetof(trap_state, tt));
- }
- break;
- case 4: // tick
- {
- TCGv_ptr r_tickptr;
- TCGv_i32 r_const;
-
- r_tickptr = tcg_temp_new_ptr();
- r_const = tcg_constant_i32(dc->mem_idx);
- tcg_gen_ld_ptr(r_tickptr, tcg_env,
- offsetof(CPUSPARCState, tick));
- if (translator_io_start(&dc->base)) {
- dc->base.is_jmp = DISAS_EXIT;
- }
- gen_helper_tick_get_count(cpu_tmp0, tcg_env,
- r_tickptr, r_const);
- }
- break;
- case 5: // tba
- tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
- break;
- case 6: // pstate
- tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, pstate));
- break;
- case 7: // tl
- tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, tl));
- break;
- case 8: // pil
- tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, psrpil));
- break;
- case 9: // cwp
- gen_helper_rdcwp(cpu_tmp0, tcg_env);
- break;
- case 10: // cansave
- tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, cansave));
- break;
- case 11: // canrestore
- tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, canrestore));
- break;
- case 12: // cleanwin
- tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, cleanwin));
- break;
- case 13: // otherwin
- tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, otherwin));
- break;
- case 14: // wstate
- tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, wstate));
- break;
- case 16: // UA2005 gl
- CHECK_IU_FEATURE(dc, GL);
- tcg_gen_ld32s_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, gl));
- break;
- case 26: // UA2005 strand status
- CHECK_IU_FEATURE(dc, HYPV);
- if (!hypervisor(dc))
- goto priv_insn;
- tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
- break;
- case 31: // ver
- tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
- break;
- case 15: // fq
- default:
- goto illegal_insn;
- }
-#else
- tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
-#endif
- gen_store_gpr(dc, rd, cpu_tmp0);
- break;
-#endif
-#if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
- } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
-#ifdef TARGET_SPARC64
- gen_helper_flushw(tcg_env);
-#else
- if (!supervisor(dc))
- goto priv_insn;
- gen_store_gpr(dc, rd, cpu_tbr);
-#endif
- break;
-#endif
- } else if (xop == 0x34) { /* FPU Operations */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_op_clear_ieee_excp_and_FTT();
- rs1 = GET_FIELD(insn, 13, 17);
- rs2 = GET_FIELD(insn, 27, 31);
- xop = GET_FIELD(insn, 18, 26);
-
- switch (xop) {
- case 0x1: /* fmovs */
- cpu_src1_32 = gen_load_fpr_F(dc, rs2);
- gen_store_fpr_F(dc, rd, cpu_src1_32);
- break;
- case 0x5: /* fnegs */
- gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
- break;
- case 0x9: /* fabss */
- gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
- break;
- case 0x29: /* fsqrts */
- CHECK_FPU_FEATURE(dc, FSQRT);
- gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
- break;
- case 0x2a: /* fsqrtd */
- CHECK_FPU_FEATURE(dc, FSQRT);
- gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
- break;
- case 0x2b: /* fsqrtq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
- break;
- case 0x41: /* fadds */
- gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
- break;
- case 0x42: /* faddd */
- gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
- break;
- case 0x43: /* faddq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
- break;
- case 0x45: /* fsubs */
- gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
- break;
- case 0x46: /* fsubd */
- gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
- break;
- case 0x47: /* fsubq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
- break;
- case 0x49: /* fmuls */
- CHECK_FPU_FEATURE(dc, FMUL);
- gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
- break;
- case 0x4a: /* fmuld */
- CHECK_FPU_FEATURE(dc, FMUL);
- gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
- break;
- case 0x4b: /* fmulq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- CHECK_FPU_FEATURE(dc, FMUL);
- gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
- break;
- case 0x4d: /* fdivs */
- gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
- break;
- case 0x4e: /* fdivd */
- gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
- break;
- case 0x4f: /* fdivq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
- break;
- case 0x69: /* fsmuld */
- CHECK_FPU_FEATURE(dc, FSMULD);
- gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
- break;
- case 0x6e: /* fdmulq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
- break;
- case 0xc4: /* fitos */
- gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
- break;
- case 0xc6: /* fdtos */
- gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
- break;
- case 0xc7: /* fqtos */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
- break;
- case 0xc8: /* fitod */
- gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
- break;
- case 0xc9: /* fstod */
- gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
- break;
- case 0xcb: /* fqtod */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
- break;
- case 0xcc: /* fitoq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
- break;
- case 0xcd: /* fstoq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
- break;
- case 0xce: /* fdtoq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
- break;
- case 0xd1: /* fstoi */
- gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
- break;
- case 0xd2: /* fdtoi */
- gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
- break;
- case 0xd3: /* fqtoi */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
- break;
-#ifdef TARGET_SPARC64
- case 0x2: /* V9 fmovd */
- cpu_src1_64 = gen_load_fpr_D(dc, rs2);
- gen_store_fpr_D(dc, rd, cpu_src1_64);
- break;
- case 0x3: /* V9 fmovq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_move_Q(dc, rd, rs2);
- break;
- case 0x6: /* V9 fnegd */
- gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
- break;
- case 0x7: /* V9 fnegq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
- break;
- case 0xa: /* V9 fabsd */
- gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
- break;
- case 0xb: /* V9 fabsq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
- break;
- case 0x81: /* V9 fstox */
- gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
- break;
- case 0x82: /* V9 fdtox */
- gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
- break;
- case 0x83: /* V9 fqtox */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
- break;
- case 0x84: /* V9 fxtos */
- gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
- break;
- case 0x88: /* V9 fxtod */
- gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
- break;
- case 0x8c: /* V9 fxtoq */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
- break;
-#endif
- default:
- goto illegal_insn;
- }
- } else if (xop == 0x35) { /* FPU Operations */
-#ifdef TARGET_SPARC64
- int cond;
-#endif
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_op_clear_ieee_excp_and_FTT();
- rs1 = GET_FIELD(insn, 13, 17);
- rs2 = GET_FIELD(insn, 27, 31);
- xop = GET_FIELD(insn, 18, 26);
+static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
+{
+ TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
+ DisasCompare cmp;
-#ifdef TARGET_SPARC64
-#define FMOVR(sz) \
- do { \
- DisasCompare cmp; \
- cond = GET_FIELD_SP(insn, 10, 12); \
- cpu_src1 = get_src1(dc, insn); \
- gen_compare_reg(&cmp, cond, cpu_src1); \
- gen_fmov##sz(dc, &cmp, rd, rs2); \
- } while (0)
-
- if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
- FMOVR(s);
- break;
- } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
- FMOVR(d);
- break;
- } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVR(q);
- break;
- }
-#undef FMOVR
-#endif
- switch (xop) {
-#ifdef TARGET_SPARC64
-#define FMOVCC(fcc, sz) \
- do { \
- DisasCompare cmp; \
- cond = GET_FIELD_SP(insn, 14, 17); \
- gen_fcompare(&cmp, fcc, cond); \
- gen_fmov##sz(dc, &cmp, rd, rs2); \
- } while (0)
-
- case 0x001: /* V9 fmovscc %fcc0 */
- FMOVCC(0, s);
- break;
- case 0x002: /* V9 fmovdcc %fcc0 */
- FMOVCC(0, d);
- break;
- case 0x003: /* V9 fmovqcc %fcc0 */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(0, q);
- break;
- case 0x041: /* V9 fmovscc %fcc1 */
- FMOVCC(1, s);
- break;
- case 0x042: /* V9 fmovdcc %fcc1 */
- FMOVCC(1, d);
- break;
- case 0x043: /* V9 fmovqcc %fcc1 */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(1, q);
- break;
- case 0x081: /* V9 fmovscc %fcc2 */
- FMOVCC(2, s);
- break;
- case 0x082: /* V9 fmovdcc %fcc2 */
- FMOVCC(2, d);
- break;
- case 0x083: /* V9 fmovqcc %fcc2 */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(2, q);
- break;
- case 0x0c1: /* V9 fmovscc %fcc3 */
- FMOVCC(3, s);
- break;
- case 0x0c2: /* V9 fmovdcc %fcc3 */
- FMOVCC(3, d);
- break;
- case 0x0c3: /* V9 fmovqcc %fcc3 */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(3, q);
- break;
-#undef FMOVCC
-#define FMOVCC(xcc, sz) \
- do { \
- DisasCompare cmp; \
- cond = GET_FIELD_SP(insn, 14, 17); \
- gen_compare(&cmp, xcc, cond, dc); \
- gen_fmov##sz(dc, &cmp, rd, rs2); \
- } while (0)
-
- case 0x101: /* V9 fmovscc %icc */
- FMOVCC(0, s);
- break;
- case 0x102: /* V9 fmovdcc %icc */
- FMOVCC(0, d);
- break;
- case 0x103: /* V9 fmovqcc %icc */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(0, q);
- break;
- case 0x181: /* V9 fmovscc %xcc */
- FMOVCC(1, s);
- break;
- case 0x182: /* V9 fmovdcc %xcc */
- FMOVCC(1, d);
- break;
- case 0x183: /* V9 fmovqcc %xcc */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- FMOVCC(1, q);
- break;
-#undef FMOVCC
-#endif
- case 0x51: /* fcmps, V9 %fcc */
- cpu_src1_32 = gen_load_fpr_F(dc, rs1);
- cpu_src2_32 = gen_load_fpr_F(dc, rs2);
- gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
- break;
- case 0x52: /* fcmpd, V9 %fcc */
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
- break;
- case 0x53: /* fcmpq, V9 %fcc */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_op_load_fpr_QT0(QFPREG(rs1));
- gen_op_load_fpr_QT1(QFPREG(rs2));
- gen_op_fcmpq(rd & 3);
- break;
- case 0x55: /* fcmpes, V9 %fcc */
- cpu_src1_32 = gen_load_fpr_F(dc, rs1);
- cpu_src2_32 = gen_load_fpr_F(dc, rs2);
- gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
- break;
- case 0x56: /* fcmped, V9 %fcc */
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
- break;
- case 0x57: /* fcmpeq, V9 %fcc */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_op_load_fpr_QT0(QFPREG(rs1));
- gen_op_load_fpr_QT1(QFPREG(rs2));
- gen_op_fcmpeq(rd & 3);
- break;
- default:
- goto illegal_insn;
- }
- } else if (xop == 0x2) {
- TCGv dst = gen_dest_gpr(dc, rd);
- rs1 = GET_FIELD(insn, 13, 17);
- if (rs1 == 0) {
- /* clr/mov shortcut : or %g0, x, y -> mov x, y */
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- tcg_gen_movi_tl(dst, simm);
- gen_store_gpr(dc, rd, dst);
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2 == 0) {
- tcg_gen_movi_tl(dst, 0);
- gen_store_gpr(dc, rd, dst);
- } else {
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_store_gpr(dc, rd, cpu_src2);
- }
- }
- } else {
- cpu_src1 = get_src1(dc, insn);
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- tcg_gen_ori_tl(dst, cpu_src1, simm);
- gen_store_gpr(dc, rd, dst);
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2 == 0) {
- /* mov shortcut: or x, %g0, y -> mov x, y */
- gen_store_gpr(dc, rd, cpu_src1);
- } else {
- cpu_src2 = gen_load_gpr(dc, rs2);
- tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, dst);
- }
- }
- }
-#ifdef TARGET_SPARC64
- } else if (xop == 0x25) { /* sll, V9 sllx */
- cpu_src1 = get_src1(dc, insn);
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- if (insn & (1 << 12)) {
- tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
- } else {
- tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
- }
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- cpu_tmp0 = tcg_temp_new();
- if (insn & (1 << 12)) {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
- } else {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
- }
- tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
- }
- gen_store_gpr(dc, rd, cpu_dst);
- } else if (xop == 0x26) { /* srl, V9 srlx */
- cpu_src1 = get_src1(dc, insn);
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- if (insn & (1 << 12)) {
- tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
- } else {
- tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
- tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
- }
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- cpu_tmp0 = tcg_temp_new();
- if (insn & (1 << 12)) {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
- tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
- } else {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
- tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
- }
- }
- gen_store_gpr(dc, rd, cpu_dst);
- } else if (xop == 0x27) { /* sra, V9 srax */
- cpu_src1 = get_src1(dc, insn);
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- if (insn & (1 << 12)) {
- tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
- } else {
- tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
- tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
- }
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- cpu_tmp0 = tcg_temp_new();
- if (insn & (1 << 12)) {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
- tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
- } else {
- tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
- tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
- }
- }
- gen_store_gpr(dc, rd, cpu_dst);
-#endif
- } else if (xop < 0x36) {
- if (xop < 0x20) {
- cpu_src1 = get_src1(dc, insn);
- cpu_src2 = get_src2(dc, insn);
- switch (xop & ~0x10) {
- case 0x0: /* add */
- if (xop & 0x10) {
- gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
- dc->cc_op = CC_OP_ADD;
- } else {
- tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
- }
- break;
- case 0x1: /* and */
- tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x2: /* or */
- tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x3: /* xor */
- tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x4: /* sub */
- if (xop & 0x10) {
- gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
- dc->cc_op = CC_OP_SUB;
- } else {
- tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
- }
- break;
- case 0x5: /* andn */
- tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x6: /* orn */
- tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x7: /* xorn */
- tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0x8: /* addx, V9 addc */
- gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
- (xop & 0x10));
- break;
-#ifdef TARGET_SPARC64
- case 0x9: /* V9 mulx */
- tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
- break;
-#endif
- case 0xa: /* umul */
- CHECK_IU_FEATURE(dc, MUL);
- gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0xb: /* smul */
- CHECK_IU_FEATURE(dc, MUL);
- gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
- if (xop & 0x10) {
- tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
- dc->cc_op = CC_OP_LOGIC;
- }
- break;
- case 0xc: /* subx, V9 subc */
- gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
- (xop & 0x10));
- break;
-#ifdef TARGET_SPARC64
- case 0xd: /* V9 udivx */
- gen_helper_udivx(cpu_dst, tcg_env, cpu_src1, cpu_src2);
- break;
-#endif
- case 0xe: /* udiv */
- CHECK_IU_FEATURE(dc, DIV);
- if (xop & 0x10) {
- gen_helper_udiv_cc(cpu_dst, tcg_env, cpu_src1,
- cpu_src2);
- dc->cc_op = CC_OP_DIV;
- } else {
- gen_helper_udiv(cpu_dst, tcg_env, cpu_src1,
- cpu_src2);
- }
- break;
- case 0xf: /* sdiv */
- CHECK_IU_FEATURE(dc, DIV);
- if (xop & 0x10) {
- gen_helper_sdiv_cc(cpu_dst, tcg_env, cpu_src1,
- cpu_src2);
- dc->cc_op = CC_OP_DIV;
- } else {
- gen_helper_sdiv(cpu_dst, tcg_env, cpu_src1,
- cpu_src2);
- }
- break;
- default:
- goto illegal_insn;
- }
- gen_store_gpr(dc, rd, cpu_dst);
- } else {
- cpu_src1 = get_src1(dc, insn);
- cpu_src2 = get_src2(dc, insn);
- switch (xop) {
- case 0x20: /* taddcc */
- gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
- dc->cc_op = CC_OP_TADD;
- break;
- case 0x21: /* tsubcc */
- gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
- dc->cc_op = CC_OP_TSUB;
- break;
- case 0x22: /* taddcctv */
- gen_helper_taddcctv(cpu_dst, tcg_env,
- cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- dc->cc_op = CC_OP_TADDTV;
- break;
- case 0x23: /* tsubcctv */
- gen_helper_tsubcctv(cpu_dst, tcg_env,
- cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- dc->cc_op = CC_OP_TSUBTV;
- break;
- case 0x24: /* mulscc */
- update_psr(dc);
- gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
- dc->cc_op = CC_OP_ADD;
- break;
-#ifndef TARGET_SPARC64
- case 0x25: /* sll */
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
- } else { /* register */
- cpu_tmp0 = tcg_temp_new();
- tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
- }
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x26: /* srl */
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
- } else { /* register */
- cpu_tmp0 = tcg_temp_new();
- tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
- }
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x27: /* sra */
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 20, 31);
- tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
- } else { /* register */
- cpu_tmp0 = tcg_temp_new();
- tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
- tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
- }
- gen_store_gpr(dc, rd, cpu_dst);
- break;
-#endif
- case 0x30:
- {
- cpu_tmp0 = tcg_temp_new();
- switch(rd) {
- case 0: /* wry */
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
- break;
-#ifndef TARGET_SPARC64
- case 0x01 ... 0x0f: /* undefined in the
- SPARCv8 manual, nop
- on the microSPARC
- II */
- case 0x10 ... 0x1f: /* implementation-dependent
- in the SPARCv8
- manual, nop on the
- microSPARC II */
- if ((rd == 0x13) && (dc->def->features &
- CPU_FEATURE_POWERDOWN)) {
- /* LEON3 power-down */
- save_state(dc);
- gen_helper_power_down(tcg_env);
- }
- break;
-#else
- case 0x2: /* V9 wrccr */
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_wrccr(tcg_env, cpu_tmp0);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
- dc->cc_op = CC_OP_FLAGS;
- break;
- case 0x3: /* V9 wrasi */
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
- tcg_gen_st32_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, asi));
- /*
- * End TB to notice changed ASI.
- * TODO: Could notice src1 = %g0 and IS_IMM,
- * update DisasContext and not exit the TB.
- */
- save_state(dc);
- gen_op_next_insn();
- tcg_gen_lookup_and_goto_ptr();
- dc->base.is_jmp = DISAS_NORETURN;
- break;
- case 0x6: /* V9 wrfprs */
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
- dc->fprs_dirty = 0;
- save_state(dc);
- gen_op_next_insn();
- tcg_gen_exit_tb(NULL, 0);
- dc->base.is_jmp = DISAS_NORETURN;
- break;
- case 0xf: /* V9 sir, nop if user */
-#if !defined(CONFIG_USER_ONLY)
- if (supervisor(dc)) {
- ; // XXX
- }
-#endif
- break;
- case 0x13: /* Graphics Status */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
- break;
- case 0x14: /* Softint set */
- if (!supervisor(dc))
- goto illegal_insn;
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_set_softint(tcg_env, cpu_tmp0);
- break;
- case 0x15: /* Softint clear */
- if (!supervisor(dc))
- goto illegal_insn;
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_clear_softint(tcg_env, cpu_tmp0);
- break;
- case 0x16: /* Softint write */
- if (!supervisor(dc))
- goto illegal_insn;
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_write_softint(tcg_env, cpu_tmp0);
- break;
- case 0x17: /* Tick compare */
-#if !defined(CONFIG_USER_ONLY)
- if (!supervisor(dc))
- goto illegal_insn;
-#endif
- {
- TCGv_ptr r_tickptr;
-
- tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
- cpu_src2);
- r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, tcg_env,
- offsetof(CPUSPARCState, tick));
- translator_io_start(&dc->base);
- gen_helper_tick_set_limit(r_tickptr,
- cpu_tick_cmpr);
- /* End TB to handle timer interrupt */
- dc->base.is_jmp = DISAS_EXIT;
- }
- break;
- case 0x18: /* System tick */
-#if !defined(CONFIG_USER_ONLY)
- if (!supervisor(dc))
- goto illegal_insn;
-#endif
- {
- TCGv_ptr r_tickptr;
-
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
- cpu_src2);
- r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, tcg_env,
- offsetof(CPUSPARCState, stick));
- translator_io_start(&dc->base);
- gen_helper_tick_set_count(r_tickptr,
- cpu_tmp0);
- /* End TB to handle timer interrupt */
- dc->base.is_jmp = DISAS_EXIT;
- }
- break;
- case 0x19: /* System tick compare */
-#if !defined(CONFIG_USER_ONLY)
- if (!supervisor(dc))
- goto illegal_insn;
-#endif
- {
- TCGv_ptr r_tickptr;
-
- tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
- cpu_src2);
- r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, tcg_env,
- offsetof(CPUSPARCState, stick));
- translator_io_start(&dc->base);
- gen_helper_tick_set_limit(r_tickptr,
- cpu_stick_cmpr);
- /* End TB to handle timer interrupt */
- dc->base.is_jmp = DISAS_EXIT;
- }
- break;
-
- case 0x10: /* Performance Control */
- case 0x11: /* Performance Instrumentation
- Counter */
- case 0x12: /* Dispatch Control */
-#endif
- default:
- goto illegal_insn;
- }
- }
- break;
-#if !defined(CONFIG_USER_ONLY)
- case 0x31: /* wrpsr, V9 saved, restored */
- {
- if (!supervisor(dc))
- goto priv_insn;
-#ifdef TARGET_SPARC64
- switch (rd) {
- case 0:
- gen_helper_saved(tcg_env);
- break;
- case 1:
- gen_helper_restored(tcg_env);
- break;
- case 2: /* UA2005 allclean */
- case 3: /* UA2005 otherw */
- case 4: /* UA2005 normalw */
- case 5: /* UA2005 invalw */
- // XXX
- default:
- goto illegal_insn;
- }
-#else
- cpu_tmp0 = tcg_temp_new();
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- gen_helper_wrpsr(tcg_env, cpu_tmp0);
- tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
- dc->cc_op = CC_OP_FLAGS;
- save_state(dc);
- gen_op_next_insn();
- tcg_gen_exit_tb(NULL, 0);
- dc->base.is_jmp = DISAS_NORETURN;
-#endif
- }
- break;
- case 0x32: /* wrwim, V9 wrpr */
- {
- if (!supervisor(dc))
- goto priv_insn;
- cpu_tmp0 = tcg_temp_new();
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
-#ifdef TARGET_SPARC64
- switch (rd) {
- case 0: // tpc
- {
- TCGv_ptr r_tsptr;
-
- r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, tcg_env);
- tcg_gen_st_tl(cpu_tmp0, r_tsptr,
- offsetof(trap_state, tpc));
- }
- break;
- case 1: // tnpc
- {
- TCGv_ptr r_tsptr;
-
- r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, tcg_env);
- tcg_gen_st_tl(cpu_tmp0, r_tsptr,
- offsetof(trap_state, tnpc));
- }
- break;
- case 2: // tstate
- {
- TCGv_ptr r_tsptr;
-
- r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, tcg_env);
- tcg_gen_st_tl(cpu_tmp0, r_tsptr,
- offsetof(trap_state,
- tstate));
- }
- break;
- case 3: // tt
- {
- TCGv_ptr r_tsptr;
-
- r_tsptr = tcg_temp_new_ptr();
- gen_load_trap_state_at_tl(r_tsptr, tcg_env);
- tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
- offsetof(trap_state, tt));
- }
- break;
- case 4: // tick
- {
- TCGv_ptr r_tickptr;
-
- r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, tcg_env,
- offsetof(CPUSPARCState, tick));
- translator_io_start(&dc->base);
- gen_helper_tick_set_count(r_tickptr,
- cpu_tmp0);
- /* End TB to handle timer interrupt */
- dc->base.is_jmp = DISAS_EXIT;
- }
- break;
- case 5: // tba
- tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
- break;
- case 6: // pstate
- save_state(dc);
- if (translator_io_start(&dc->base)) {
- dc->base.is_jmp = DISAS_EXIT;
- }
- gen_helper_wrpstate(tcg_env, cpu_tmp0);
- dc->npc = DYNAMIC_PC;
- break;
- case 7: // tl
- save_state(dc);
- tcg_gen_st32_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState, tl));
- dc->npc = DYNAMIC_PC;
- break;
- case 8: // pil
- if (translator_io_start(&dc->base)) {
- dc->base.is_jmp = DISAS_EXIT;
- }
- gen_helper_wrpil(tcg_env, cpu_tmp0);
- break;
- case 9: // cwp
- gen_helper_wrcwp(tcg_env, cpu_tmp0);
- break;
- case 10: // cansave
- tcg_gen_st32_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState,
- cansave));
- break;
- case 11: // canrestore
- tcg_gen_st32_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState,
- canrestore));
- break;
- case 12: // cleanwin
- tcg_gen_st32_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState,
- cleanwin));
- break;
- case 13: // otherwin
- tcg_gen_st32_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState,
- otherwin));
- break;
- case 14: // wstate
- tcg_gen_st32_tl(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState,
- wstate));
- break;
- case 16: // UA2005 gl
- CHECK_IU_FEATURE(dc, GL);
- gen_helper_wrgl(tcg_env, cpu_tmp0);
- break;
- case 26: // UA2005 strand status
- CHECK_IU_FEATURE(dc, HYPV);
- if (!hypervisor(dc))
- goto priv_insn;
- tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
- break;
- default:
- goto illegal_insn;
- }
-#else
- tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
- if (dc->def->nwindows != 32) {
- tcg_gen_andi_tl(cpu_wim, cpu_wim,
- (1 << dc->def->nwindows) - 1);
- }
-#endif
- }
- break;
- case 0x33: /* wrtbr, UA2005 wrhpr */
- {
-#ifndef TARGET_SPARC64
- if (!supervisor(dc))
- goto priv_insn;
- tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
-#else
- CHECK_IU_FEATURE(dc, HYPV);
- if (!hypervisor(dc))
- goto priv_insn;
- cpu_tmp0 = tcg_temp_new();
- tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
- switch (rd) {
- case 0: // hpstate
- tcg_gen_st_i64(cpu_tmp0, tcg_env,
- offsetof(CPUSPARCState,
- hpstate));
- save_state(dc);
- gen_op_next_insn();
- tcg_gen_exit_tb(NULL, 0);
- dc->base.is_jmp = DISAS_NORETURN;
- break;
- case 1: // htstate
- // XXX gen_op_wrhtstate();
- break;
- case 3: // hintp
- tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
- break;
- case 5: // htba
- tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
- break;
- case 31: // hstick_cmpr
- {
- TCGv_ptr r_tickptr;
-
- tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
- r_tickptr = tcg_temp_new_ptr();
- tcg_gen_ld_ptr(r_tickptr, tcg_env,
- offsetof(CPUSPARCState, hstick));
- translator_io_start(&dc->base);
- gen_helper_tick_set_limit(r_tickptr,
- cpu_hstick_cmpr);
- /* End TB to handle timer interrupt */
- dc->base.is_jmp = DISAS_EXIT;
- }
- break;
- case 6: // hver readonly
- default:
- goto illegal_insn;
- }
-#endif
- }
- break;
-#endif
-#ifdef TARGET_SPARC64
- case 0x2c: /* V9 movcc */
- {
- int cc = GET_FIELD_SP(insn, 11, 12);
- int cond = GET_FIELD_SP(insn, 14, 17);
- DisasCompare cmp;
- TCGv dst;
-
- if (insn & (1 << 18)) {
- if (cc == 0) {
- gen_compare(&cmp, 0, cond, dc);
- } else if (cc == 2) {
- gen_compare(&cmp, 1, cond, dc);
- } else {
- goto illegal_insn;
- }
- } else {
- gen_fcompare(&cmp, cc, cond);
- }
-
- /* The get_src2 above loaded the normal 13-bit
- immediate field, not the 11-bit field we have
- in movcc. But it did handle the reg case. */
- if (IS_IMM) {
- simm = GET_FIELD_SPs(insn, 0, 10);
- tcg_gen_movi_tl(cpu_src2, simm);
- }
-
- dst = gen_load_gpr(dc, rd);
- tcg_gen_movcond_tl(cmp.cond, dst,
- cmp.c1, cmp.c2,
- cpu_src2, dst);
- gen_store_gpr(dc, rd, dst);
- break;
- }
- case 0x2d: /* V9 sdivx */
- gen_helper_sdivx(cpu_dst, tcg_env, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x2e: /* V9 popc */
- tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x2f: /* V9 movr */
- {
- int cond = GET_FIELD_SP(insn, 10, 12);
- DisasCompare cmp;
- TCGv dst;
-
- gen_compare_reg(&cmp, cond, cpu_src1);
-
- /* The get_src2 above loaded the normal 13-bit
- immediate field, not the 10-bit field we have
- in movr. But it did handle the reg case. */
- if (IS_IMM) {
- simm = GET_FIELD_SPs(insn, 0, 9);
- tcg_gen_movi_tl(cpu_src2, simm);
- }
-
- dst = gen_load_gpr(dc, rd);
- tcg_gen_movcond_tl(cmp.cond, dst,
- cmp.c1, cmp.c2,
- cpu_src2, dst);
- gen_store_gpr(dc, rd, dst);
- break;
- }
-#endif
- default:
- goto illegal_insn;
- }
- }
- } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
-#ifdef TARGET_SPARC64
- int opf = GET_FIELD_SP(insn, 5, 13);
- rs1 = GET_FIELD(insn, 13, 17);
- rs2 = GET_FIELD(insn, 27, 31);
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
+ if (src2 == NULL) {
+ return false;
+ }
+ gen_compare(&cmp, a->cc, a->cond, dc);
+ return do_mov_cond(dc, &cmp, a->rd, src2);
+}
- switch (opf) {
- case 0x000: /* VIS I edge8cc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x001: /* VIS II edge8n */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x002: /* VIS I edge8lcc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x003: /* VIS II edge8ln */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x004: /* VIS I edge16cc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x005: /* VIS II edge16n */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x006: /* VIS I edge16lcc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x007: /* VIS II edge16ln */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x008: /* VIS I edge32cc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x009: /* VIS II edge32n */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x00a: /* VIS I edge32lcc */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x00b: /* VIS II edge32ln */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x010: /* VIS I array8 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x012: /* VIS I array16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x014: /* VIS I array32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x018: /* VIS I alignaddr */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x01a: /* VIS I alignaddrl */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x019: /* VIS II bmask */
- CHECK_FPU_FEATURE(dc, VIS2);
- cpu_src1 = gen_load_gpr(dc, rs1);
- cpu_src2 = gen_load_gpr(dc, rs2);
- tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x020: /* VIS I fcmple16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x022: /* VIS I fcmpne16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x024: /* VIS I fcmple32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x026: /* VIS I fcmpne32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x028: /* VIS I fcmpgt16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x02a: /* VIS I fcmpeq16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x02c: /* VIS I fcmpgt32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x02e: /* VIS I fcmpeq32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- cpu_src2_64 = gen_load_fpr_D(dc, rs2);
- gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
- gen_store_gpr(dc, rd, cpu_dst);
- break;
- case 0x031: /* VIS I fmul8x16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
- break;
- case 0x033: /* VIS I fmul8x16au */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
- break;
- case 0x035: /* VIS I fmul8x16al */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
- break;
- case 0x036: /* VIS I fmul8sux16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
- break;
- case 0x037: /* VIS I fmul8ulx16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
- break;
- case 0x038: /* VIS I fmuld8sux16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
- break;
- case 0x039: /* VIS I fmuld8ulx16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
- break;
- case 0x03a: /* VIS I fpack32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
- break;
- case 0x03b: /* VIS I fpack16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs2);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x03d: /* VIS I fpackfix */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs2);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x03e: /* VIS I pdist */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
- break;
- case 0x048: /* VIS I faligndata */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
- break;
- case 0x04b: /* VIS I fpmerge */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
- break;
- case 0x04c: /* VIS II bshuffle */
- CHECK_FPU_FEATURE(dc, VIS2);
- gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
- break;
- case 0x04d: /* VIS I fexpand */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
- break;
- case 0x050: /* VIS I fpadd16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
- break;
- case 0x051: /* VIS I fpadd16s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
- break;
- case 0x052: /* VIS I fpadd32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
- break;
- case 0x053: /* VIS I fpadd32s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
- break;
- case 0x054: /* VIS I fpsub16 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
- break;
- case 0x055: /* VIS I fpsub16s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
- break;
- case 0x056: /* VIS I fpsub32 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
- break;
- case 0x057: /* VIS I fpsub32s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
- break;
- case 0x060: /* VIS I fzero */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_dst_64 = gen_dest_fpr_D(dc, rd);
- tcg_gen_movi_i64(cpu_dst_64, 0);
- gen_store_fpr_D(dc, rd, cpu_dst_64);
- break;
- case 0x061: /* VIS I fzeros */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- tcg_gen_movi_i32(cpu_dst_32, 0);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x062: /* VIS I fnor */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
- break;
- case 0x063: /* VIS I fnors */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
- break;
- case 0x064: /* VIS I fandnot2 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
- break;
- case 0x065: /* VIS I fandnot2s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
- break;
- case 0x066: /* VIS I fnot2 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
- break;
- case 0x067: /* VIS I fnot2s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
- break;
- case 0x068: /* VIS I fandnot1 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
- break;
- case 0x069: /* VIS I fandnot1s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
- break;
- case 0x06a: /* VIS I fnot1 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
- break;
- case 0x06b: /* VIS I fnot1s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
- break;
- case 0x06c: /* VIS I fxor */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
- break;
- case 0x06d: /* VIS I fxors */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
- break;
- case 0x06e: /* VIS I fnand */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
- break;
- case 0x06f: /* VIS I fnands */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
- break;
- case 0x070: /* VIS I fand */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
- break;
- case 0x071: /* VIS I fands */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
- break;
- case 0x072: /* VIS I fxnor */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
- break;
- case 0x073: /* VIS I fxnors */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
- break;
- case 0x074: /* VIS I fsrc1 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs1);
- gen_store_fpr_D(dc, rd, cpu_src1_64);
- break;
- case 0x075: /* VIS I fsrc1s */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_32 = gen_load_fpr_F(dc, rs1);
- gen_store_fpr_F(dc, rd, cpu_src1_32);
- break;
- case 0x076: /* VIS I fornot2 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
- break;
- case 0x077: /* VIS I fornot2s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
- break;
- case 0x078: /* VIS I fsrc2 */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_64 = gen_load_fpr_D(dc, rs2);
- gen_store_fpr_D(dc, rd, cpu_src1_64);
- break;
- case 0x079: /* VIS I fsrc2s */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_src1_32 = gen_load_fpr_F(dc, rs2);
- gen_store_fpr_F(dc, rd, cpu_src1_32);
- break;
- case 0x07a: /* VIS I fornot1 */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
- break;
- case 0x07b: /* VIS I fornot1s */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
- break;
- case 0x07c: /* VIS I for */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
- break;
- case 0x07d: /* VIS I fors */
- CHECK_FPU_FEATURE(dc, VIS1);
- gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
- break;
- case 0x07e: /* VIS I fone */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_dst_64 = gen_dest_fpr_D(dc, rd);
- tcg_gen_movi_i64(cpu_dst_64, -1);
- gen_store_fpr_D(dc, rd, cpu_dst_64);
- break;
- case 0x07f: /* VIS I fones */
- CHECK_FPU_FEATURE(dc, VIS1);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- tcg_gen_movi_i32(cpu_dst_32, -1);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x080: /* VIS I shutdown */
- case 0x081: /* VIS II siam */
- // XXX
- goto illegal_insn;
- default:
- goto illegal_insn;
- }
-#else
- goto ncp_insn;
-#endif
- } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
-#ifdef TARGET_SPARC64
- goto illegal_insn;
-#else
- goto ncp_insn;
-#endif
-#ifdef TARGET_SPARC64
- } else if (xop == 0x39) { /* V9 return */
- save_state(dc);
- cpu_src1 = get_src1(dc, insn);
- cpu_tmp0 = tcg_temp_new();
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2) {
- cpu_src2 = gen_load_gpr(dc, rs2);
- tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
- } else {
- tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
- }
- }
- gen_helper_restore(tcg_env);
- gen_mov_pc_npc(dc);
- gen_check_align(cpu_tmp0, 3);
- tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
- dc->npc = DYNAMIC_PC_LOOKUP;
- goto jmp_insn;
-#endif
- } else {
- cpu_src1 = get_src1(dc, insn);
- cpu_tmp0 = tcg_temp_new();
- if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2) {
- cpu_src2 = gen_load_gpr(dc, rs2);
- tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
- } else {
- tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
- }
- }
- switch (xop) {
- case 0x38: /* jmpl */
- {
- TCGv t = gen_dest_gpr(dc, rd);
- tcg_gen_movi_tl(t, dc->pc);
- gen_store_gpr(dc, rd, t);
-
- gen_mov_pc_npc(dc);
- gen_check_align(cpu_tmp0, 3);
- gen_address_mask(dc, cpu_tmp0);
- tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
- dc->npc = DYNAMIC_PC_LOOKUP;
- }
- goto jmp_insn;
-#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
- case 0x39: /* rett, V9 return */
- {
- if (!supervisor(dc))
- goto priv_insn;
- gen_mov_pc_npc(dc);
- gen_check_align(cpu_tmp0, 3);
- tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
- dc->npc = DYNAMIC_PC;
- gen_helper_rett(tcg_env);
- }
- goto jmp_insn;
-#endif
- case 0x3b: /* flush */
- if (!((dc)->def->features & CPU_FEATURE_FLUSH))
- goto unimp_flush;
- /* nop */
- break;
- case 0x3c: /* save */
- gen_helper_save(tcg_env);
- gen_store_gpr(dc, rd, cpu_tmp0);
- break;
- case 0x3d: /* restore */
- gen_helper_restore(tcg_env);
- gen_store_gpr(dc, rd, cpu_tmp0);
- break;
-#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
- case 0x3e: /* V9 done/retry */
- {
- switch (rd) {
- case 0:
- if (!supervisor(dc))
- goto priv_insn;
- dc->npc = DYNAMIC_PC;
- dc->pc = DYNAMIC_PC;
- translator_io_start(&dc->base);
- gen_helper_done(tcg_env);
- goto jmp_insn;
- case 1:
- if (!supervisor(dc))
- goto priv_insn;
- dc->npc = DYNAMIC_PC;
- dc->pc = DYNAMIC_PC;
- translator_io_start(&dc->base);
- gen_helper_retry(tcg_env);
- goto jmp_insn;
- default:
- goto illegal_insn;
- }
- }
- break;
-#endif
- default:
- goto illegal_insn;
- }
- }
- break;
- }
- break;
- case 3: /* load/store instructions */
- {
- unsigned int xop = GET_FIELD(insn, 7, 12);
- /* ??? gen_address_mask prevents us from using a source
- register directly. Always generate a temporary. */
- TCGv cpu_addr = tcg_temp_new();
-
- tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
- if (xop == 0x3c || xop == 0x3e) {
- /* V9 casa/casxa : no offset */
- } else if (IS_IMM) { /* immediate */
- simm = GET_FIELDs(insn, 19, 31);
- if (simm != 0) {
- tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
- }
- } else { /* register */
- rs2 = GET_FIELD(insn, 27, 31);
- if (rs2 != 0) {
- tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
- }
- }
- if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
- (xop > 0x17 && xop <= 0x1d ) ||
- (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
- TCGv cpu_val = gen_dest_gpr(dc, rd);
-
- switch (xop) {
- case 0x0: /* ld, V9 lduw, load unsigned word */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- break;
- case 0x1: /* ldub, load unsigned byte */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_UB);
- break;
- case 0x2: /* lduh, load unsigned halfword */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUW | MO_ALIGN);
- break;
- case 0x3: /* ldd, load double word */
- if (rd & 1)
- goto illegal_insn;
- else {
- TCGv_i64 t64;
-
- gen_address_mask(dc, cpu_addr);
- t64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t64, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- tcg_gen_trunc_i64_tl(cpu_val, t64);
- tcg_gen_ext32u_tl(cpu_val, cpu_val);
- gen_store_gpr(dc, rd + 1, cpu_val);
- tcg_gen_shri_i64(t64, t64, 32);
- tcg_gen_trunc_i64_tl(cpu_val, t64);
- tcg_gen_ext32u_tl(cpu_val, cpu_val);
- }
- break;
- case 0x9: /* ldsb, load signed byte */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
- break;
- case 0xa: /* ldsh, load signed halfword */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TESW | MO_ALIGN);
- break;
- case 0xd: /* ldstub */
- gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
- break;
- case 0x0f:
- /* swap, swap register with memory. Also atomically */
- CHECK_IU_FEATURE(dc, SWAP);
- cpu_src1 = gen_load_gpr(dc, rd);
- gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
- dc->mem_idx, MO_TEUL);
- break;
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
- case 0x10: /* lda, V9 lduwa, load word alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
- break;
- case 0x11: /* lduba, load unsigned byte alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
- break;
- case 0x12: /* lduha, load unsigned halfword alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
- break;
- case 0x13: /* ldda, load double word alternate */
- if (rd & 1) {
- goto illegal_insn;
- }
- gen_ldda_asi(dc, cpu_addr, insn, rd);
- goto skip_move;
- case 0x19: /* ldsba, load signed byte alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
- break;
- case 0x1a: /* ldsha, load signed halfword alternate */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
- break;
- case 0x1d: /* ldstuba -- XXX: should be atomically */
- gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
- break;
- case 0x1f: /* swapa, swap reg with alt. memory. Also
- atomically */
- CHECK_IU_FEATURE(dc, SWAP);
- cpu_src1 = gen_load_gpr(dc, rd);
- gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
- break;
+static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
+{
+ TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
+ DisasCompare cmp;
-#ifndef TARGET_SPARC64
- case 0x30: /* ldc */
- case 0x31: /* ldcsr */
- case 0x33: /* lddc */
- goto ncp_insn;
-#endif
-#endif
-#ifdef TARGET_SPARC64
- case 0x08: /* V9 ldsw */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TESL | MO_ALIGN);
- break;
- case 0x0b: /* V9 ldx */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- break;
- case 0x18: /* V9 ldswa */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
- break;
- case 0x1b: /* V9 ldxa */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
- break;
- case 0x2d: /* V9 prefetch, no effect */
- goto skip_move;
- case 0x30: /* V9 ldfa */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
- gen_update_fprs_dirty(dc, rd);
- goto skip_move;
- case 0x33: /* V9 lddfa */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
- gen_update_fprs_dirty(dc, DFPREG(rd));
- goto skip_move;
- case 0x3d: /* V9 prefetcha, no effect */
- goto skip_move;
- case 0x32: /* V9 ldqfa */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
- gen_update_fprs_dirty(dc, QFPREG(rd));
- goto skip_move;
-#endif
- default:
- goto illegal_insn;
- }
- gen_store_gpr(dc, rd, cpu_val);
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
- skip_move: ;
-#endif
- } else if (xop >= 0x20 && xop < 0x24) {
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- switch (xop) {
- case 0x20: /* ldf, load fpreg */
- gen_address_mask(dc, cpu_addr);
- cpu_dst_32 = gen_dest_fpr_F(dc);
- tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- gen_store_fpr_F(dc, rd, cpu_dst_32);
- break;
- case 0x21: /* ldfsr, V9 ldxfsr */
-#ifdef TARGET_SPARC64
- gen_address_mask(dc, cpu_addr);
- if (rd == 1) {
- TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t64, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64);
- break;
- }
-#endif
- cpu_dst_32 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32);
- break;
- case 0x22: /* ldqf, load quad fpreg */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_address_mask(dc, cpu_addr);
- cpu_src1_64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
- MO_TEUQ | MO_ALIGN_4);
- tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
- cpu_src2_64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
- MO_TEUQ | MO_ALIGN_4);
- gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
- break;
- case 0x23: /* lddf, load double fpreg */
- gen_address_mask(dc, cpu_addr);
- cpu_dst_64 = gen_dest_fpr_D(dc, rd);
- tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
- MO_TEUQ | MO_ALIGN_4);
- gen_store_fpr_D(dc, rd, cpu_dst_64);
- break;
- default:
- goto illegal_insn;
- }
- } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
- xop == 0xe || xop == 0x1e) {
- TCGv cpu_val = gen_load_gpr(dc, rd);
-
- switch (xop) {
- case 0x4: /* st, store word */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- break;
- case 0x5: /* stb, store byte */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
- break;
- case 0x6: /* sth, store halfword */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUW | MO_ALIGN);
- break;
- case 0x7: /* std, store double word */
- if (rd & 1)
- goto illegal_insn;
- else {
- TCGv_i64 t64;
- TCGv lo;
-
- gen_address_mask(dc, cpu_addr);
- lo = gen_load_gpr(dc, rd + 1);
- t64 = tcg_temp_new_i64();
- tcg_gen_concat_tl_i64(t64, lo, cpu_val);
- tcg_gen_qemu_st_i64(t64, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- }
- break;
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
- case 0x14: /* sta, V9 stwa, store word alternate */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
- break;
- case 0x15: /* stba, store byte alternate */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
- break;
- case 0x16: /* stha, store halfword alternate */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
- break;
- case 0x17: /* stda, store double word alternate */
- if (rd & 1) {
- goto illegal_insn;
- }
- gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
- break;
-#endif
-#ifdef TARGET_SPARC64
- case 0x0e: /* V9 stx */
- gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- break;
- case 0x1e: /* V9 stxa */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
- break;
-#endif
- default:
- goto illegal_insn;
- }
- } else if (xop > 0x23 && xop < 0x28) {
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- switch (xop) {
- case 0x24: /* stf, store fpreg */
- gen_address_mask(dc, cpu_addr);
- cpu_src1_32 = gen_load_fpr_F(dc, rd);
- tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- break;
- case 0x25: /* stfsr, V9 stxfsr */
- {
-#ifdef TARGET_SPARC64
- gen_address_mask(dc, cpu_addr);
- if (rd == 1) {
- tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN);
- break;
- }
-#endif
- tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
- dc->mem_idx, MO_TEUL | MO_ALIGN);
- }
- break;
- case 0x26:
-#ifdef TARGET_SPARC64
- /* V9 stqf, store quad fpreg */
- CHECK_FPU_FEATURE(dc, FLOAT128);
- gen_address_mask(dc, cpu_addr);
- /* ??? While stqf only requires 4-byte alignment, it is
- legal for the cpu to signal the unaligned exception.
- The OS trap handler is then required to fix it up.
- For qemu, this avoids having to probe the second page
- before performing the first write. */
- cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
- tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
- dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
- tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
- cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
- tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
- dc->mem_idx, MO_TEUQ);
- break;
-#else /* !TARGET_SPARC64 */
- /* stdfq, store floating point queue */
-#if defined(CONFIG_USER_ONLY)
- goto illegal_insn;
-#else
- if (!supervisor(dc))
- goto priv_insn;
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- goto nfq_insn;
-#endif
-#endif
- case 0x27: /* stdf, store double fpreg */
- gen_address_mask(dc, cpu_addr);
- cpu_src1_64 = gen_load_fpr_D(dc, rd);
- tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
- MO_TEUQ | MO_ALIGN_4);
- break;
- default:
- goto illegal_insn;
- }
- } else if (xop > 0x33 && xop < 0x3f) {
- switch (xop) {
-#ifdef TARGET_SPARC64
- case 0x34: /* V9 stfa */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_stf_asi(dc, cpu_addr, insn, 4, rd);
- break;
- case 0x36: /* V9 stqfa */
- {
- CHECK_FPU_FEATURE(dc, FLOAT128);
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
- }
- break;
- case 0x37: /* V9 stdfa */
- if (gen_trap_ifnofpu(dc)) {
- goto jmp_insn;
- }
- gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
- break;
- case 0x3e: /* V9 casxa */
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
- break;
-#else
- case 0x34: /* stc */
- case 0x35: /* stcsr */
- case 0x36: /* stdcq */
- case 0x37: /* stdc */
- goto ncp_insn;
-#endif
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
- case 0x3c: /* V9 or LEON3 casa */
-#ifndef TARGET_SPARC64
- CHECK_IU_FEATURE(dc, CASA);
-#endif
- rs2 = GET_FIELD(insn, 27, 31);
- cpu_src2 = gen_load_gpr(dc, rs2);
- gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
- break;
-#endif
- default:
- goto illegal_insn;
- }
- } else {
- goto illegal_insn;
- }
+ if (src2 == NULL) {
+ return false;
+ }
+ gen_fcompare(&cmp, a->cc, a->cond);
+ return do_mov_cond(dc, &cmp, a->rd, src2);
+}
+
+static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
+{
+ TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
+ DisasCompare cmp;
+
+ if (src2 == NULL) {
+ return false;
+ }
+ gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
+ return do_mov_cond(dc, &cmp, a->rd, src2);
+}
+
+static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
+ bool (*func)(DisasContext *dc, int rd, TCGv src))
+{
+ TCGv src1, sum;
+
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!a->imm && a->rs2_or_imm & ~0x1f) {
+ return false;
+ }
+
+ /*
+ * Always load the sum into a new temporary.
+ * This is required to capture the value across a window change,
+ * e.g. SAVE and RESTORE, and may be optimized away otherwise.
+ */
+ sum = tcg_temp_new();
+ src1 = gen_load_gpr(dc, a->rs1);
+ if (a->imm || a->rs2_or_imm == 0) {
+ tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
+ } else {
+ tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
+ }
+ return func(dc, a->rd, sum);
+}
+
+static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
+{
+ /*
+ * Preserve pc across advance, so that we can delay
+ * the writeback to rd until after src is consumed.
+ */
+ target_ulong cur_pc = dc->pc;
+
+ gen_check_align(dc, src, 3);
+
+ gen_mov_pc_npc(dc);
+ tcg_gen_mov_tl(cpu_npc, src);
+ gen_address_mask(dc, cpu_npc);
+ gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
+
+ dc->npc = DYNAMIC_PC_LOOKUP;
+ return true;
+}
+
+TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
+
+static bool do_rett(DisasContext *dc, int rd, TCGv src)
+{
+ if (!supervisor(dc)) {
+ return raise_priv(dc);
+ }
+
+ gen_check_align(dc, src, 3);
+
+ gen_mov_pc_npc(dc);
+ tcg_gen_mov_tl(cpu_npc, src);
+ gen_helper_rett(tcg_env);
+
+ dc->npc = DYNAMIC_PC;
+ return true;
+}
+
+TRANS(RETT, 32, do_add_special, a, do_rett)
+
+static bool do_return(DisasContext *dc, int rd, TCGv src)
+{
+ gen_check_align(dc, src, 3);
+
+ gen_mov_pc_npc(dc);
+ tcg_gen_mov_tl(cpu_npc, src);
+ gen_address_mask(dc, cpu_npc);
+
+ gen_helper_restore(tcg_env);
+ dc->npc = DYNAMIC_PC_LOOKUP;
+ return true;
+}
+
+TRANS(RETURN, 64, do_add_special, a, do_return)
+
+static bool do_save(DisasContext *dc, int rd, TCGv src)
+{
+ gen_helper_save(tcg_env);
+ gen_store_gpr(dc, rd, src);
+ return advance_pc(dc);
+}
+
+TRANS(SAVE, ALL, do_add_special, a, do_save)
+
+static bool do_restore(DisasContext *dc, int rd, TCGv src)
+{
+ gen_helper_restore(tcg_env);
+ gen_store_gpr(dc, rd, src);
+ return advance_pc(dc);
+}
+
+TRANS(RESTORE, ALL, do_add_special, a, do_restore)
+
+static bool do_done_retry(DisasContext *dc, bool done)
+{
+ if (!supervisor(dc)) {
+ return raise_priv(dc);
+ }
+ dc->npc = DYNAMIC_PC;
+ dc->pc = DYNAMIC_PC;
+ translator_io_start(&dc->base);
+ if (done) {
+ gen_helper_done(tcg_env);
+ } else {
+ gen_helper_retry(tcg_env);
+ }
+ return true;
+}
+
+TRANS(DONE, 64, do_done_retry, true)
+TRANS(RETRY, 64, do_done_retry, false)
+
+/*
+ * Major opcode 11 -- load and store instructions
+ */
+
+static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
+{
+ TCGv addr, tmp = NULL;
+
+ /* For simplicity, we under-decoded the rs2 form. */
+ if (!imm && rs2_or_imm & ~0x1f) {
+ return NULL;
+ }
+
+ addr = gen_load_gpr(dc, rs1);
+ if (rs2_or_imm) {
+ tmp = tcg_temp_new();
+ if (imm) {
+ tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
+ } else {
+ tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
}
- break;
+ addr = tmp;
}
- /* default case for non jump instructions */
- if (dc->npc & 3) {
- switch (dc->npc) {
- case DYNAMIC_PC:
- case DYNAMIC_PC_LOOKUP:
- dc->pc = dc->npc;
- gen_op_next_insn();
- break;
- case JUMP_PC:
- /* we can do a static jump */
- gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
- dc->base.is_jmp = DISAS_NORETURN;
- break;
- default:
- g_assert_not_reached();
+ if (AM_CHECK(dc)) {
+ if (!tmp) {
+ tmp = tcg_temp_new();
}
- } else {
- dc->pc = dc->npc;
- dc->npc = dc->npc + 4;
+ tcg_gen_ext32u_tl(tmp, addr);
+ addr = tmp;
+ }
+ return addr;
+}
+
+static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
+{
+ TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ DisasASI da;
+
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, mop);
+
+ reg = gen_dest_gpr(dc, a->rd);
+ gen_ld_asi(dc, &da, reg, addr);
+ gen_store_gpr(dc, a->rd, reg);
+ return advance_pc(dc);
+}
+
+TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
+TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
+TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
+TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
+TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
+TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
+TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
+
+static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
+{
+ TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ DisasASI da;
+
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, mop);
+
+ reg = gen_load_gpr(dc, a->rd);
+ gen_st_asi(dc, &da, reg, addr);
+ return advance_pc(dc);
+}
+
+TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
+TRANS(STB, ALL, do_st_gpr, a, MO_UB)
+TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
+TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
+
+static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
+{
+ TCGv addr;
+ DisasASI da;
+
+ if (a->rd & 1) {
+ return false;
+ }
+ addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, MO_TEUQ);
+ gen_ldda_asi(dc, &da, addr, a->rd);
+ return advance_pc(dc);
+}
+
+static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
+{
+ TCGv addr;
+ DisasASI da;
+
+ if (a->rd & 1) {
+ return false;
+ }
+ addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, MO_TEUQ);
+ gen_stda_asi(dc, &da, addr, a->rd);
+ return advance_pc(dc);
+}
+
+static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
+{
+ TCGv addr, reg;
+ DisasASI da;
+
+ addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, MO_UB);
+
+ reg = gen_dest_gpr(dc, a->rd);
+ gen_ldstub_asi(dc, &da, reg, addr);
+ gen_store_gpr(dc, a->rd, reg);
+ return advance_pc(dc);
+}
+
+static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
+{
+ TCGv addr, dst, src;
+ DisasASI da;
+
+ addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, MO_TEUL);
+
+ dst = gen_dest_gpr(dc, a->rd);
+ src = gen_load_gpr(dc, a->rd);
+ gen_swap_asi(dc, &da, dst, src, addr);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
+{
+ TCGv addr, o, n, c;
+ DisasASI da;
+
+ addr = gen_ldst_addr(dc, a->rs1, true, 0);
+ if (addr == NULL) {
+ return false;
+ }
+ da = resolve_asi(dc, a->asi, mop);
+
+ o = gen_dest_gpr(dc, a->rd);
+ n = gen_load_gpr(dc, a->rd);
+ c = gen_load_gpr(dc, a->rs2_or_imm);
+ gen_cas_asi(dc, &da, o, n, c, addr);
+ gen_store_gpr(dc, a->rd, o);
+ return advance_pc(dc);
+}
+
+TRANS(CASA, CASA, do_casa, a, MO_TEUL)
+TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
+
+static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
+{
+ TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ DisasASI da;
+
+ if (addr == NULL) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (sz == MO_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+ da = resolve_asi(dc, a->asi, MO_TE | sz);
+ gen_ldf_asi(dc, &da, sz, addr, a->rd);
+ gen_update_fprs_dirty(dc, a->rd);
+ return advance_pc(dc);
+}
+
+TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
+TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
+TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
+
+TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
+TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
+TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
+
+static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
+{
+ TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ DisasASI da;
+
+ if (addr == NULL) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (sz == MO_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+ da = resolve_asi(dc, a->asi, MO_TE | sz);
+ gen_stf_asi(dc, &da, sz, addr, a->rd);
+ return advance_pc(dc);
+}
+
+TRANS(STF, ALL, do_st_fpr, a, MO_32)
+TRANS(STDF, ALL, do_st_fpr, a, MO_64)
+TRANS(STQF, ALL, do_st_fpr, a, MO_128)
+
+TRANS(STFA, 64, do_st_fpr, a, MO_32)
+TRANS(STDFA, 64, do_st_fpr, a, MO_64)
+TRANS(STQFA, 64, do_st_fpr, a, MO_128)
+
+static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
+{
+ if (!avail_32(dc)) {
+ return false;
+ }
+ if (!supervisor(dc)) {
+ return raise_priv(dc);
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
}
- jmp_insn:
- return;
- illegal_insn:
- gen_exception(dc, TT_ILL_INSN);
- return;
- unimp_flush:
- gen_exception(dc, TT_UNIMP_FLUSH);
- return;
-#if !defined(CONFIG_USER_ONLY)
- priv_insn:
- gen_exception(dc, TT_PRIV_INSN);
- return;
-#endif
- nfpu_insn:
- gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
- return;
-#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
- nfq_insn:
gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
- return;
-#endif
-#ifndef TARGET_SPARC64
- ncp_insn:
- gen_exception(dc, TT_NCP_INSN);
- return;
-#endif
+ return true;
+}
+
+static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
+ target_ulong new_mask, target_ulong old_mask)
+{
+ TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ tmp = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
+ tcg_gen_andi_tl(tmp, tmp, new_mask);
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
+ tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
+ gen_helper_set_fsr(tcg_env, cpu_fsr);
+ return advance_pc(dc);
+}
+
+TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
+TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
+
+static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
+{
+ TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
+ if (addr == NULL) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
+ return advance_pc(dc);
}
+TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
+TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
+
+static bool do_fc(DisasContext *dc, int rd, bool c)
+{
+ uint64_t mask;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ if (rd & 1) {
+ mask = MAKE_64BIT_MASK(0, 32);
+ } else {
+ mask = MAKE_64BIT_MASK(32, 32);
+ }
+ if (c) {
+ tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
+ } else {
+ tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
+ }
+ gen_update_fprs_dirty(dc, rd);
+ return advance_pc(dc);
+}
+
+TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
+TRANS(FONEs, VIS1, do_fc, a->rd, 1)
+
+static bool do_dc(DisasContext *dc, int rd, int64_t c)
+{
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
+ gen_update_fprs_dirty(dc, rd);
+ return advance_pc(dc);
+}
+
+TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
+TRANS(FONEd, VIS1, do_dc, a->rd, -1)
+
+static bool do_ff(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 tmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ tmp = gen_load_fpr_F(dc, a->rs);
+ func(tmp, tmp);
+ gen_store_fpr_F(dc, a->rd, tmp);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
+TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
+TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
+TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
+TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
+
+static bool do_fd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_i64))
+{
+ TCGv_i32 dst;
+ TCGv_i64 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_fpr_F(dc);
+ src = gen_load_fpr_D(dc, a->rs);
+ func(dst, src);
+ gen_store_fpr_F(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
+TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
+
+static bool do_env_ff(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
+{
+ TCGv_i32 tmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ tmp = gen_load_fpr_F(dc, a->rs);
+ func(tmp, tcg_env, tmp);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_F(dc, a->rd, tmp);
+ return advance_pc(dc);
+}
+
+TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
+TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
+TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
+
+static bool do_env_fd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
+{
+ TCGv_i32 dst;
+ TCGv_i64 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_F(dc);
+ src = gen_load_fpr_D(dc, a->rs);
+ func(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_F(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
+TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
+TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
+
+static bool do_dd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src = gen_load_fpr_D(dc, a->rs);
+ func(dst, src);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
+TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
+TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
+TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
+TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
+
+static bool do_env_dd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
+{
+ TCGv_i64 dst, src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src = gen_load_fpr_D(dc, a->rs);
+ func(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
+TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
+TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
+
+static bool do_env_df(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
+{
+ TCGv_i64 dst;
+ TCGv_i32 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src = gen_load_fpr_F(dc, a->rs);
+ func(dst, tcg_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
+TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
+TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
+
+static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a)
+{
+ int rd, rs;
+
+ if (!avail_64(dc)) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ rd = QFPREG(a->rd);
+ rs = QFPREG(a->rs);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
+ gen_update_fprs_dirty(dc, rd);
+ return advance_pc(dc);
+}
+
+static bool do_qq(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_env))
+{
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT1(QFPREG(a->rs));
+ func(tcg_env);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq)
+TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq)
+
+static bool do_env_qq(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_env))
+{
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT1(QFPREG(a->rs));
+ func(tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
+
+static bool do_env_fq(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i32, TCGv_env))
+{
+ TCGv_i32 dst;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT1(QFPREG(a->rs));
+ dst = gen_dest_fpr_F(dc);
+ func(dst, tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_F(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
+TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
+
+static bool do_env_dq(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_i64, TCGv_env))
+{
+ TCGv_i64 dst;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT1(QFPREG(a->rs));
+ dst = gen_dest_fpr_D(dc, a->rd);
+ func(dst, tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
+TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
+
+static bool do_env_qf(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_env, TCGv_i32))
+{
+ TCGv_i32 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src = gen_load_fpr_F(dc, a->rs);
+ func(tcg_env, src);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
+TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
+
+static bool do_env_qd(DisasContext *dc, arg_r_r *a,
+ void (*func)(TCGv_env, TCGv_i64))
+{
+ TCGv_i64 src;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src = gen_load_fpr_D(dc, a->rs);
+ func(tcg_env, src);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
+TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
+
+static bool do_fff(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ src1 = gen_load_fpr_F(dc, a->rs1);
+ src2 = gen_load_fpr_F(dc, a->rs2);
+ func(src1, src1, src2);
+ gen_store_fpr_F(dc, a->rd, src1);
+ return advance_pc(dc);
+}
+
+TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
+TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
+TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
+TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
+TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
+TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
+TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
+TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
+TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
+TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
+TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
+TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
+
+static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src1 = gen_load_fpr_F(dc, a->rs1);
+ src2 = gen_load_fpr_F(dc, a->rs2);
+ func(src1, tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_F(dc, a->rd, src1);
+ return advance_pc(dc);
+}
+
+TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
+TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
+TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
+TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
+
+static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ func(dst, src1, src2);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
+TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
+TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
+TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
+TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
+TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
+TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
+TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
+TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
+
+TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
+TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
+TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
+TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
+TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
+TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
+TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
+TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
+TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
+TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
+TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
+TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
+
+TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
+TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
+TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
+
+static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 src1, src2;
+ TCGv dst;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_gpr(dc, a->rd);
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ func(dst, src1, src2);
+ gen_store_gpr(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
+TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
+TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
+TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
+
+TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
+TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
+TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
+TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
+
+static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ func(dst, tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
+TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
+TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
+TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
+
+static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
+{
+ TCGv_i64 dst;
+ TCGv_i32 src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
+ return raise_unimpfpop(dc);
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src1 = gen_load_fpr_F(dc, a->rs1);
+ src2 = gen_load_fpr_F(dc, a->rs2);
+ gen_helper_fsmuld(dst, tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src0, src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ dst = gen_dest_fpr_D(dc, a->rd);
+ src0 = gen_load_fpr_D(dc, a->rd);
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ func(dst, src0, src1, src2);
+ gen_store_fpr_D(dc, a->rd, dst);
+ return advance_pc(dc);
+}
+
+TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
+
+static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
+ void (*func)(TCGv_env))
+{
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT0(QFPREG(a->rs1));
+ gen_op_load_fpr_QT1(QFPREG(a->rs2));
+ func(tcg_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
+TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
+TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
+TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
+
+static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
+{
+ TCGv_i64 src1, src2;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ gen_helper_fdmulq(tcg_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
+ gen_op_store_QT0_fpr(QFPREG(a->rd));
+ gen_update_fprs_dirty(dc, QFPREG(a->rd));
+ return advance_pc(dc);
+}
+
+static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
+ void (*func)(DisasContext *, DisasCompare *, int, int))
+{
+ DisasCompare cmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (is_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
+ func(dc, &cmp, a->rd, a->rs2);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
+TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
+TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
+
+static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
+ void (*func)(DisasContext *, DisasCompare *, int, int))
+{
+ DisasCompare cmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (is_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_compare(&cmp, a->cc, a->cond, dc);
+ func(dc, &cmp, a->rd, a->rs2);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
+TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
+TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
+
+static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
+ void (*func)(DisasContext *, DisasCompare *, int, int))
+{
+ DisasCompare cmp;
+
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (is_128 && gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_fcompare(&cmp, a->cc, a->cond);
+ func(dc, &cmp, a->rd, a->rs2);
+ return advance_pc(dc);
+}
+
+TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
+TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
+TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
+
+static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
+{
+ TCGv_i32 src1, src2;
+
+ if (avail_32(dc) && a->cc != 0) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src1 = gen_load_fpr_F(dc, a->rs1);
+ src2 = gen_load_fpr_F(dc, a->rs2);
+ if (e) {
+ gen_op_fcmpes(a->cc, src1, src2);
+ } else {
+ gen_op_fcmps(a->cc, src1, src2);
+ }
+ return advance_pc(dc);
+}
+
+TRANS(FCMPs, ALL, do_fcmps, a, false)
+TRANS(FCMPEs, ALL, do_fcmps, a, true)
+
+static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
+{
+ TCGv_i64 src1, src2;
+
+ if (avail_32(dc) && a->cc != 0) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ src1 = gen_load_fpr_D(dc, a->rs1);
+ src2 = gen_load_fpr_D(dc, a->rs2);
+ if (e) {
+ gen_op_fcmped(a->cc, src1, src2);
+ } else {
+ gen_op_fcmpd(a->cc, src1, src2);
+ }
+ return advance_pc(dc);
+}
+
+TRANS(FCMPd, ALL, do_fcmpd, a, false)
+TRANS(FCMPEd, ALL, do_fcmpd, a, true)
+
+static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
+{
+ if (avail_32(dc) && a->cc != 0) {
+ return false;
+ }
+ if (gen_trap_ifnofpu(dc)) {
+ return true;
+ }
+ if (gen_trap_float128(dc)) {
+ return true;
+ }
+
+ gen_op_clear_ieee_excp_and_FTT();
+ gen_op_load_fpr_QT0(QFPREG(a->rs1));
+ gen_op_load_fpr_QT1(QFPREG(a->rs2));
+ if (e) {
+ gen_op_fcmpeq(a->cc);
+ } else {
+ gen_op_fcmpq(a->cc);
+ }
+ return advance_pc(dc);
+}
+
+TRANS(FCMPq, ALL, do_fcmpq, a, false)
+TRANS(FCMPEq, ALL, do_fcmpq, a, true)
+
static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
@@ -5630,7 +5379,10 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
insn = translator_ldl(env, &dc->base, dc->pc);
dc->base.pc_next += 4;
- disas_sparc_insn(dc, insn);
+
+ if (!decode(dc, insn)) {
+ gen_exception(dc, TT_ILL_INSN);
+ }
if (dc->base.is_jmp == DISAS_NORETURN) {
return;
@@ -5643,6 +5395,7 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
+ DisasDelayException *e, *e_next;
bool may_lookup;
switch (dc->base.is_jmp) {
@@ -5654,10 +5407,10 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
break;
}
+ may_lookup = true;
if (dc->pc & 3) {
switch (dc->pc) {
case DYNAMIC_PC_LOOKUP:
- may_lookup = true;
break;
case DYNAMIC_PC:
may_lookup = false;
@@ -5667,10 +5420,24 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
}
} else {
tcg_gen_movi_tl(cpu_pc, dc->pc);
- may_lookup = true;
}
- save_npc(dc);
+ if (dc->npc & 3) {
+ switch (dc->npc) {
+ case JUMP_PC:
+ gen_generic_branch(dc);
+ break;
+ case DYNAMIC_PC:
+ may_lookup = false;
+ break;
+ case DYNAMIC_PC_LOOKUP:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ tcg_gen_movi_tl(cpu_npc, dc->npc);
+ }
if (may_lookup) {
tcg_gen_lookup_and_goto_ptr();
} else {
@@ -5690,6 +5457,19 @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
default:
g_assert_not_reached();
}
+
+ for (e = dc->delay_excp_list; e ; e = e_next) {
+ gen_set_label(e->lab);
+
+ tcg_gen_movi_tl(cpu_pc, e->pc);
+ if (e->npc % 4 == 0) {
+ tcg_gen_movi_tl(cpu_npc, e->npc);
+ }
+ gen_helper_raise_exception(tcg_env, e->excp);
+
+ e_next = e->next;
+ g_free(e);
+ }
}
static void sparc_tr_disas_log(const DisasContextBase *dcbase,
@@ -5735,8 +5515,6 @@ void sparc_tcg_init(void)
#ifdef TARGET_SPARC64
{ &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
{ &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
-#else
- { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
#endif
{ &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
{ &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
@@ -5745,15 +5523,6 @@ void sparc_tcg_init(void)
static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
#ifdef TARGET_SPARC64
{ &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
- { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
- { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
- { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
- "hstick_cmpr" },
- { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
- { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
- { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
- { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
- { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
#endif
{ &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
{ &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
@@ -5763,9 +5532,7 @@ void sparc_tcg_init(void)
{ &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
{ &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
{ &cpu_y, offsetof(CPUSPARCState, y), "y" },
-#ifndef CONFIG_USER_ONLY
{ &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
-#endif
};
unsigned int i;
diff --git a/target/sparc/vis_helper.c b/target/sparc/vis_helper.c
index 3afdc6975c..7763b16c24 100644
--- a/target/sparc/vis_helper.c
+++ b/target/sparc/vis_helper.c
@@ -275,65 +275,6 @@ uint64_t helper_fexpand(uint64_t src1, uint64_t src2)
return d.ll;
}
-#define VIS_HELPER(name, F) \
- uint64_t name##16(uint64_t src1, uint64_t src2) \
- { \
- VIS64 s, d; \
- \
- s.ll = src1; \
- d.ll = src2; \
- \
- d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
- d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
- d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
- d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
- \
- return d.ll; \
- } \
- \
- uint32_t name##16s(uint32_t src1, uint32_t src2) \
- { \
- VIS32 s, d; \
- \
- s.l = src1; \
- d.l = src2; \
- \
- d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
- d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
- \
- return d.l; \
- } \
- \
- uint64_t name##32(uint64_t src1, uint64_t src2) \
- { \
- VIS64 s, d; \
- \
- s.ll = src1; \
- d.ll = src2; \
- \
- d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
- d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
- \
- return d.ll; \
- } \
- \
- uint32_t name##32s(uint32_t src1, uint32_t src2) \
- { \
- VIS32 s, d; \
- \
- s.l = src1; \
- d.l = src2; \
- \
- d.l = F(d.l, s.l); \
- \
- return d.l; \
- }
-
-#define FADD(a, b) ((a) + (b))
-#define FSUB(a, b) ((a) - (b))
-VIS_HELPER(helper_fpadd, FADD)
-VIS_HELPER(helper_fpsub, FSUB)
-
#define VIS_CMPHELPER(name, F) \
uint64_t name##16(uint64_t src1, uint64_t src2) \
{ \
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index dd812ec0f0..66553d1be0 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -6542,28 +6542,16 @@ static void decode_rrpw_extract_insert(DisasContext *ctx)
switch (op2) {
case OPC2_32_RRPW_EXTR:
if (width == 0) {
- tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
- break;
- }
-
- if (pos + width <= 32) {
- /* optimize special cases */
- if ((pos == 0) && (width == 8)) {
- tcg_gen_ext8s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
- } else if ((pos == 0) && (width == 16)) {
- tcg_gen_ext16s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
- } else {
- tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 32 - pos - width);
- tcg_gen_sari_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 32 - width);
- }
+ tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
+ } else if (pos + width <= 32) {
+ tcg_gen_sextract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
}
break;
case OPC2_32_RRPW_EXTR_U:
if (width == 0) {
tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
} else {
- tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos);
- tcg_gen_andi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], ~0u >> (32-width));
+ tcg_gen_extract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width);
}
break;
case OPC2_32_RRPW_IMASK:
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 54bee7ddba..de89940599 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -2262,17 +2262,7 @@ static void translate_salt(DisasContext *dc, const OpcodeArg arg[],
static void translate_sext(DisasContext *dc, const OpcodeArg arg[],
const uint32_t par[])
{
- int shift = 31 - arg[2].imm;
-
- if (shift == 24) {
- tcg_gen_ext8s_i32(arg[0].out, arg[1].in);
- } else if (shift == 16) {
- tcg_gen_ext16s_i32(arg[0].out, arg[1].in);
- } else {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_shli_i32(tmp, arg[1].in, shift);
- tcg_gen_sari_i32(arg[0].out, tmp, shift);
- }
+ tcg_gen_sextract_i32(arg[0].out, arg[1].in, 0, arg[2].imm + 1);
}
static uint32_t test_exceptions_simcall(DisasContext *dc,
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 3afb896a3a..a3efa1e67a 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -77,9 +77,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define TCG_REG_TMP2 TCG_REG_X30
#define TCG_VEC_TMP0 TCG_REG_V31
-#ifndef CONFIG_SOFTMMU
#define TCG_REG_GUEST_BASE TCG_REG_X28
-#endif
static bool reloc_pc26(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
{
@@ -1664,97 +1662,98 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
s_bits == MO_128);
a_mask = (1 << h->aa.align) - 1;
-#ifdef CONFIG_SOFTMMU
- unsigned s_mask = (1u << s_bits) - 1;
- unsigned mem_index = get_mmuidx(oi);
- TCGReg addr_adj;
- TCGType mask_type;
- uint64_t compare_mask;
-
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
-
- mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32
- ? TCG_TYPE_I64 : TCG_TYPE_I32);
-
- /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
- QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
- QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
- tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0,
- tlb_mask_table_ofs(s, mem_index), 1, 0);
-
- /* Extract the TLB index from the address into X0. */
- tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
- TCG_REG_TMP0, TCG_REG_TMP0, addr_reg,
- s->page_bits - CPU_TLB_ENTRY_BITS);
-
- /* Add the tlb_table pointer, forming the CPUTLBEntry address in TMP1. */
- tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0);
-
- /* Load the tlb comparator into TMP0, and the fast path addend into TMP1. */
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
- tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP1,
- is_ld ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write));
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
- offsetof(CPUTLBEntry, addend));
+ if (tcg_use_softmmu) {
+ unsigned s_mask = (1u << s_bits) - 1;
+ unsigned mem_index = get_mmuidx(oi);
+ TCGReg addr_adj;
+ TCGType mask_type;
+ uint64_t compare_mask;
- /*
- * For aligned accesses, we check the first byte and include the alignment
- * bits within the address. For unaligned access, we check that we don't
- * cross pages using the address of the last byte of the access.
- */
- if (a_mask >= s_mask) {
- addr_adj = addr_reg;
- } else {
- addr_adj = TCG_REG_TMP2;
- tcg_out_insn(s, 3401, ADDI, addr_type,
- addr_adj, addr_reg, s_mask - a_mask);
- }
- compare_mask = (uint64_t)s->page_mask | a_mask;
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
- /* Store the page mask part of the address into TMP2. */
- tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2,
- addr_adj, compare_mask);
+ mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32
+ ? TCG_TYPE_I64 : TCG_TYPE_I32);
- /* Perform the address comparison. */
- tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0);
+ /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
+ tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0,
+ tlb_mask_table_ofs(s, mem_index), 1, 0);
- /* If not equal, we jump to the slow path. */
- ldst->label_ptr[0] = s->code_ptr;
- tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
+ /* Extract the TLB index from the address into X0. */
+ tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
+ TCG_REG_TMP0, TCG_REG_TMP0, addr_reg,
+ s->page_bits - CPU_TLB_ENTRY_BITS);
- h->base = TCG_REG_TMP1;
- h->index = addr_reg;
- h->index_ext = addr_type;
-#else
- if (a_mask) {
- ldst = new_ldst_label(s);
+ /* Add the tlb_table pointer, forming the CPUTLBEntry address. */
+ tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ /* Load the tlb comparator into TMP0, and the fast path addend. */
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
+ tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP1,
+ is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
+ offsetof(CPUTLBEntry, addend));
+
+ /*
+ * For aligned accesses, we check the first byte and include
+ * the alignment bits within the address. For unaligned access,
+ * we check that we don't cross pages using the address of the
+ * last byte of the access.
+ */
+ if (a_mask >= s_mask) {
+ addr_adj = addr_reg;
+ } else {
+ addr_adj = TCG_REG_TMP2;
+ tcg_out_insn(s, 3401, ADDI, addr_type,
+ addr_adj, addr_reg, s_mask - a_mask);
+ }
+ compare_mask = (uint64_t)s->page_mask | a_mask;
+
+ /* Store the page mask part of the address into TMP2. */
+ tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2,
+ addr_adj, compare_mask);
- /* tst addr, #mask */
- tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
+ /* Perform the address comparison. */
+ tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0);
- /* b.ne slow_path */
+ /* If not equal, we jump to the slow path. */
ldst->label_ptr[0] = s->code_ptr;
tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
- }
- if (guest_base || addr_type == TCG_TYPE_I32) {
- h->base = TCG_REG_GUEST_BASE;
+ h->base = TCG_REG_TMP1;
h->index = addr_reg;
h->index_ext = addr_type;
} else {
- h->base = addr_reg;
- h->index = TCG_REG_XZR;
- h->index_ext = TCG_TYPE_I64;
+ if (a_mask) {
+ ldst = new_ldst_label(s);
+
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
+
+ /* tst addr, #mask */
+ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
+
+ /* b.ne slow_path */
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
+ }
+
+ if (guest_base || addr_type == TCG_TYPE_I32) {
+ h->base = TCG_REG_GUEST_BASE;
+ h->index = addr_reg;
+ h->index_ext = addr_type;
+ } else {
+ h->base = addr_reg;
+ h->index = TCG_REG_XZR;
+ h->index_ext = TCG_TYPE_I64;
+ }
}
-#endif
return ldst;
}
@@ -3117,16 +3116,16 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
CPU_TEMP_BUF_NLONGS * sizeof(long));
-#if !defined(CONFIG_SOFTMMU)
- /*
- * Note that XZR cannot be encoded in the address base register slot,
- * as that actually encodes SP. Depending on the guest, we may need
- * to zero-extend the guest address via the address index register slot,
- * therefore we need to load even a zero guest base into a register.
- */
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
-#endif
+ if (!tcg_use_softmmu) {
+ /*
+ * Note that XZR cannot be encoded in the address base register slot,
+ * as that actually encodes SP. Depending on the guest, we may need
+ * to zero-extend the guest address via the address index register slot,
+ * therefore we need to load even a zero guest base into a register.
+ */
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
+ }
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]);
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index 0d9c2d157b..fc78566494 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -89,9 +89,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define TCG_REG_TMP TCG_REG_R12
#define TCG_VEC_TMP TCG_REG_Q15
-#ifndef CONFIG_SOFTMMU
#define TCG_REG_GUEST_BASE TCG_REG_R11
-#endif
typedef enum {
COND_EQ = 0x0,
@@ -356,14 +354,8 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
* r0-r3 will be overwritten when reading the tlb entry (system-mode only);
* r14 will be overwritten by the BLNE branching to the slow path.
*/
-#ifdef CONFIG_SOFTMMU
#define ALL_QLDST_REGS \
- (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
- (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
- (1 << TCG_REG_R14)))
-#else
-#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14))
-#endif
+ (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14)))
/*
* ARM immediates for ALU instructions are made of an unsigned 8-bit
@@ -1387,113 +1379,115 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
MemOp opc = get_memop(oi);
unsigned a_mask;
-#ifdef CONFIG_SOFTMMU
- *h = (HostAddress){
- .cond = COND_AL,
- .base = addrlo,
- .index = TCG_REG_R1,
- .index_scratch = true,
- };
-#else
- *h = (HostAddress){
- .cond = COND_AL,
- .base = addrlo,
- .index = guest_base ? TCG_REG_GUEST_BASE : -1,
- .index_scratch = false,
- };
-#endif
+ if (tcg_use_softmmu) {
+ *h = (HostAddress){
+ .cond = COND_AL,
+ .base = addrlo,
+ .index = TCG_REG_R1,
+ .index_scratch = true,
+ };
+ } else {
+ *h = (HostAddress){
+ .cond = COND_AL,
+ .base = addrlo,
+ .index = guest_base ? TCG_REG_GUEST_BASE : -1,
+ .index_scratch = false,
+ };
+ }
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
a_mask = (1 << h->aa.align) - 1;
-#ifdef CONFIG_SOFTMMU
- int mem_index = get_mmuidx(oi);
- int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write);
- int fast_off = tlb_mask_table_ofs(s, mem_index);
- unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
- TCGReg t_addr;
-
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
-
- /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
- QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
- QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
- tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
-
- /* Extract the tlb index from the address into R0. */
- tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
- SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
+ if (tcg_use_softmmu) {
+ int mem_index = get_mmuidx(oi);
+ int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write);
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
+ unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
+ TCGReg t_addr;
- /*
- * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
- * Load the tlb comparator into R2/R3 and the fast path addend into R1.
- */
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
- if (cmp_off == 0) {
- if (s->addr_type == TCG_TYPE_I32) {
- tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
- } else {
- tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
- }
- } else {
- tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
- TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
- if (s->addr_type == TCG_TYPE_I32) {
- tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addrlo;
+ ldst->addrhi_reg = addrhi;
+
+ /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
+ tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
+
+ /* Extract the tlb index from the address into R0. */
+ tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
+ SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
+
+ /*
+ * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
+ * Load the tlb comparator into R2/R3 and the fast path addend into R1.
+ */
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
+ if (cmp_off == 0) {
+ if (s->addr_type == TCG_TYPE_I32) {
+ tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2,
+ TCG_REG_R1, TCG_REG_R0);
+ } else {
+ tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2,
+ TCG_REG_R1, TCG_REG_R0);
+ }
} else {
- tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
+ TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
+ if (s->addr_type == TCG_TYPE_I32) {
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
+ } else {
+ tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
+ }
}
- }
- /* Load the tlb addend. */
- tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
- offsetof(CPUTLBEntry, addend));
+ /* Load the tlb addend. */
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
+ offsetof(CPUTLBEntry, addend));
- /*
- * Check alignment, check comparators.
- * Do this in 2-4 insns. Use MOVW for v7, if possible,
- * to reduce the number of sequential conditional instructions.
- * Almost all guests have at least 4k pages, which means that we need
- * to clear at least 9 bits even for an 8-byte memory, which means it
- * isn't worth checking for an immediate operand for BIC.
- *
- * For unaligned accesses, test the page of the last unit of alignment.
- * This leaves the least significant alignment bits unchanged, and of
- * course must be zero.
- */
- t_addr = addrlo;
- if (a_mask < s_mask) {
- t_addr = TCG_REG_R0;
- tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
- addrlo, s_mask - a_mask);
- }
- if (use_armv7_instructions && s->page_bits <= 16) {
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
- tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
- t_addr, TCG_REG_TMP, 0);
- tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
- } else {
- if (a_mask) {
- tcg_debug_assert(a_mask <= 0xff);
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
+ /*
+ * Check alignment, check comparators.
+ * Do this in 2-4 insns. Use MOVW for v7, if possible,
+ * to reduce the number of sequential conditional instructions.
+ * Almost all guests have at least 4k pages, which means that we need
+ * to clear at least 9 bits even for an 8-byte memory, which means it
+ * isn't worth checking for an immediate operand for BIC.
+ *
+ * For unaligned accesses, test the page of the last unit of alignment.
+ * This leaves the least significant alignment bits unchanged, and of
+ * course must be zero.
+ */
+ t_addr = addrlo;
+ if (a_mask < s_mask) {
+ t_addr = TCG_REG_R0;
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
+ addrlo, s_mask - a_mask);
+ }
+ if (use_armv7_instructions && s->page_bits <= 16) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
+ tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
+ t_addr, TCG_REG_TMP, 0);
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
+ TCG_REG_R2, TCG_REG_TMP, 0);
+ } else {
+ if (a_mask) {
+ tcg_debug_assert(a_mask <= 0xff);
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
+ }
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
+ SHIFT_IMM_LSR(s->page_bits));
+ tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
+ 0, TCG_REG_R2, TCG_REG_TMP,
+ SHIFT_IMM_LSL(s->page_bits));
}
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
- SHIFT_IMM_LSR(s->page_bits));
- tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
- 0, TCG_REG_R2, TCG_REG_TMP,
- SHIFT_IMM_LSL(s->page_bits));
- }
- if (s->addr_type != TCG_TYPE_I32) {
- tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
- }
-#else
- if (a_mask) {
+ if (s->addr_type != TCG_TYPE_I32) {
+ tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
+ }
+ } else if (a_mask) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
@@ -1505,7 +1499,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* tst addr, #mask */
tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
}
-#endif
return ldst;
}
@@ -2931,12 +2924,10 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
-#ifndef CONFIG_SOFTMMU
- if (guest_base) {
+ if (!tcg_use_softmmu && guest_base) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
}
-#endif
tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 788d608150..a83f8aab30 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -153,11 +153,8 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
# define ALL_VECTOR_REGS 0x00ff0000u
# define ALL_BYTEL_REGS 0x0000000fu
#endif
-#ifdef CONFIG_SOFTMMU
-# define SOFTMMU_RESERVE_REGS ((1 << TCG_REG_L0) | (1 << TCG_REG_L1))
-#else
-# define SOFTMMU_RESERVE_REGS 0
-#endif
+#define SOFTMMU_RESERVE_REGS \
+ (tcg_use_softmmu ? (1 << TCG_REG_L0) | (1 << TCG_REG_L1) : 0)
/* For 64-bit, we always know that CMOV is available. */
#if TCG_TARGET_REG_BITS == 64
@@ -1933,7 +1930,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
return true;
}
-#ifndef CONFIG_SOFTMMU
+#ifdef CONFIG_USER_ONLY
static HostAddress x86_guest_base = {
.index = -1
};
@@ -1949,6 +1946,7 @@ static inline int setup_guest_base_seg(void)
}
return 0;
}
+#define setup_guest_base_seg setup_guest_base_seg
#elif defined(__x86_64__) && \
(defined (__FreeBSD__) || defined (__FreeBSD_kernel__))
# include <machine/sysarch.h>
@@ -1959,13 +1957,14 @@ static inline int setup_guest_base_seg(void)
}
return 0;
}
+#define setup_guest_base_seg setup_guest_base_seg
+#endif
#else
-static inline int setup_guest_base_seg(void)
-{
- return 0;
-}
-#endif /* setup_guest_base_seg */
-#endif /* !SOFTMMU */
+# define x86_guest_base (*(HostAddress *)({ qemu_build_not_reached(); NULL; }))
+#endif /* CONFIG_USER_ONLY */
+#ifndef setup_guest_base_seg
+# define setup_guest_base_seg() 0
+#endif
#define MIN_TLB_MASK_TABLE_OFS INT_MIN
@@ -1984,94 +1983,94 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
MemOp s_bits = opc & MO_SIZE;
unsigned a_mask;
-#ifdef CONFIG_SOFTMMU
- h->index = TCG_REG_L0;
- h->ofs = 0;
- h->seg = 0;
-#else
- *h = x86_guest_base;
-#endif
+ if (tcg_use_softmmu) {
+ h->index = TCG_REG_L0;
+ h->ofs = 0;
+ h->seg = 0;
+ } else {
+ *h = x86_guest_base;
+ }
h->base = addrlo;
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
a_mask = (1 << h->aa.align) - 1;
-#ifdef CONFIG_SOFTMMU
- int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write);
- TCGType ttype = TCG_TYPE_I32;
- TCGType tlbtype = TCG_TYPE_I32;
- int trexw = 0, hrexw = 0, tlbrexw = 0;
- unsigned mem_index = get_mmuidx(oi);
- unsigned s_mask = (1 << s_bits) - 1;
- int fast_ofs = tlb_mask_table_ofs(s, mem_index);
- int tlb_mask;
-
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ if (tcg_use_softmmu) {
+ int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write);
+ TCGType ttype = TCG_TYPE_I32;
+ TCGType tlbtype = TCG_TYPE_I32;
+ int trexw = 0, hrexw = 0, tlbrexw = 0;
+ unsigned mem_index = get_mmuidx(oi);
+ unsigned s_mask = (1 << s_bits) - 1;
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
+ int tlb_mask;
- if (TCG_TARGET_REG_BITS == 64) {
- ttype = s->addr_type;
- trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
- if (TCG_TYPE_PTR == TCG_TYPE_I64) {
- hrexw = P_REXW;
- if (s->page_bits + s->tlb_dyn_max_bits > 32) {
- tlbtype = TCG_TYPE_I64;
- tlbrexw = P_REXW;
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addrlo;
+ ldst->addrhi_reg = addrhi;
+
+ if (TCG_TARGET_REG_BITS == 64) {
+ ttype = s->addr_type;
+ trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
+ if (TCG_TYPE_PTR == TCG_TYPE_I64) {
+ hrexw = P_REXW;
+ if (s->page_bits + s->tlb_dyn_max_bits > 32) {
+ tlbtype = TCG_TYPE_I64;
+ tlbrexw = P_REXW;
+ }
}
}
- }
-
- tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
- tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
- s->page_bits - CPU_TLB_ENTRY_BITS);
- tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
- fast_ofs + offsetof(CPUTLBDescFast, mask));
+ tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
+ tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
+ s->page_bits - CPU_TLB_ENTRY_BITS);
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
- fast_ofs + offsetof(CPUTLBDescFast, table));
+ tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
+ fast_ofs + offsetof(CPUTLBDescFast, mask));
- /*
- * If the required alignment is at least as large as the access, simply
- * copy the address and mask. For lesser alignments, check that we don't
- * cross pages for the complete access.
- */
- if (a_mask >= s_mask) {
- tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
- } else {
- tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
- addrlo, s_mask - a_mask);
- }
- tlb_mask = s->page_mask | a_mask;
- tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
-
- /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
- tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
- TCG_REG_L1, TCG_REG_L0, cmp_ofs);
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
+ fast_ofs + offsetof(CPUTLBDescFast, table));
- /* jne slow_path */
- tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
- ldst->label_ptr[0] = s->code_ptr;
- s->code_ptr += 4;
+ /*
+ * If the required alignment is at least as large as the access,
+ * simply copy the address and mask. For lesser alignments,
+ * check that we don't cross pages for the complete access.
+ */
+ if (a_mask >= s_mask) {
+ tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
+ } else {
+ tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
+ addrlo, s_mask - a_mask);
+ }
+ tlb_mask = s->page_mask | a_mask;
+ tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
- /* cmp 4(TCG_REG_L0), addrhi */
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, cmp_ofs + 4);
+ /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
+ TCG_REG_L1, TCG_REG_L0, cmp_ofs);
/* jne slow_path */
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
- ldst->label_ptr[1] = s->code_ptr;
+ ldst->label_ptr[0] = s->code_ptr;
s->code_ptr += 4;
- }
- /* TLB Hit. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
- offsetof(CPUTLBEntry, addend));
-#else
- if (a_mask) {
+ if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
+ /* cmp 4(TCG_REG_L0), addrhi */
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi,
+ TCG_REG_L0, cmp_ofs + 4);
+
+ /* jne slow_path */
+ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
+ ldst->label_ptr[1] = s->code_ptr;
+ s->code_ptr += 4;
+ }
+
+ /* TLB Hit. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
+ offsetof(CPUTLBEntry, addend));
+ } else if (a_mask) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
@@ -2085,7 +2084,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst->label_ptr[0] = s->code_ptr;
s->code_ptr += 4;
}
-#endif
return ldst;
}
@@ -4140,35 +4138,35 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_push(s, tcg_target_callee_save_regs[i]);
}
-#if TCG_TARGET_REG_BITS == 32
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
- (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
- tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
- /* jmp *tb. */
- tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
- (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
- + stack_addend);
-#else
-# if !defined(CONFIG_SOFTMMU)
- if (guest_base) {
+ if (!tcg_use_softmmu && guest_base) {
int seg = setup_guest_base_seg();
if (seg != 0) {
x86_guest_base.seg = seg;
} else if (guest_base == (int32_t)guest_base) {
x86_guest_base.ofs = guest_base;
} else {
+ assert(TCG_TARGET_REG_BITS == 64);
/* Choose R12 because, as a base, it requires a SIB byte. */
x86_guest_base.index = TCG_REG_R12;
tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base);
tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index);
}
}
-# endif
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
- tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
- /* jmp *tb. */
- tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
-#endif
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
+ /* jmp *tb. */
+ tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
+ + stack_addend);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
+ /* jmp *tb. */
+ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
+ }
/*
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index 801302d85d..ccf133db4b 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -165,10 +165,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
return TCG_REG_A0 + slot;
}
-#ifndef CONFIG_SOFTMMU
-#define USE_GUEST_BASE (guest_base != 0)
#define TCG_GUEST_BASE_REG TCG_REG_S1
-#endif
#define TCG_CT_CONST_ZERO 0x100
#define TCG_CT_CONST_S12 0x200
@@ -908,76 +905,77 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
a_bits = h->aa.align;
-#ifdef CONFIG_SOFTMMU
- unsigned s_bits = opc & MO_SIZE;
- int mem_index = get_mmuidx(oi);
- int fast_ofs = tlb_mask_table_ofs(s, mem_index);
- int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
- int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
-
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
-
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
-
- tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
- s->page_bits - CPU_TLB_ENTRY_BITS);
- tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
- tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
-
- /* Load the tlb comparator and the addend. */
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
- tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
- is_ld ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write));
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
- offsetof(CPUTLBEntry, addend));
-
- /*
- * For aligned accesses, we check the first byte and include the alignment
- * bits within the address. For unaligned access, we check that we don't
- * cross pages using the address of the last byte of the access.
- */
- if (a_bits < s_bits) {
- unsigned a_mask = (1u << a_bits) - 1;
- unsigned s_mask = (1u << s_bits) - 1;
- tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
- } else {
- tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
- }
- tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
- a_bits, s->page_bits - 1);
-
- /* Compare masked address with the TLB entry. */
- ldst->label_ptr[0] = s->code_ptr;
- tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
+ if (tcg_use_softmmu) {
+ unsigned s_bits = opc & MO_SIZE;
+ int mem_index = get_mmuidx(oi);
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
+ int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
+ int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
- h->index = TCG_REG_TMP2;
-#else
- if (a_bits) {
ldst = new_ldst_label(s);
-
ldst->is_ld = is_ld;
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
+
+ tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
+ s->page_bits - CPU_TLB_ENTRY_BITS);
+ tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
+ tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
+
+ /* Load the tlb comparator and the addend. */
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
+ tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
+ is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
+ offsetof(CPUTLBEntry, addend));
+
/*
- * Without micro-architecture details, we don't know which of
- * bstrpick or andi is faster, so use bstrpick as it's not
- * constrained by imm field width. Not to say alignments >= 2^12
- * are going to happen any time soon.
+ * For aligned accesses, we check the first byte and include the
+ * alignment bits within the address. For unaligned access, we
+ * check that we don't cross pages using the address of the last
+ * byte of the access.
*/
- tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
+ if (a_bits < s_bits) {
+ unsigned a_mask = (1u << a_bits) - 1;
+ unsigned s_mask = (1u << s_bits) - 1;
+ tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
+ } else {
+ tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
+ }
+ tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
+ a_bits, s->page_bits - 1);
+ /* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
- tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
- }
+ tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
- h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
-#endif
+ h->index = TCG_REG_TMP2;
+ } else {
+ if (a_bits) {
+ ldst = new_ldst_label(s);
+
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
+
+ /*
+ * Without micro-architecture details, we don't know which of
+ * bstrpick or andi is faster, so use bstrpick as it's not
+ * constrained by imm field width. Not to say alignments >= 2^12
+ * are going to happen any time soon.
+ */
+ tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
+
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
+ }
+
+ h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
+ }
if (addr_type == TCG_TYPE_I32) {
h->base = TCG_REG_TMP0;
@@ -2272,12 +2270,10 @@ static void tcg_target_qemu_prologue(TCGContext *s)
TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
}
-#if !defined(CONFIG_SOFTMMU)
- if (USE_GUEST_BASE) {
+ if (!tcg_use_softmmu && guest_base) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
-#endif
/* Call generated code */
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index e2892edc6a..328984ccff 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -78,13 +78,11 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#define TCG_TMP2 TCG_REG_T8
#define TCG_TMP3 TCG_REG_T7
-#ifndef CONFIG_SOFTMMU
#define TCG_GUEST_BASE_REG TCG_REG_S7
-#endif
#if TCG_TARGET_REG_BITS == 64
#define TCG_REG_TB TCG_REG_S6
#else
-#define TCG_REG_TB (qemu_build_not_reached(), TCG_REG_ZERO)
+#define TCG_REG_TB ({ qemu_build_not_reached(); TCG_REG_ZERO; })
#endif
/* check if we really need so many registers :P */
@@ -1279,130 +1277,129 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
a_bits = h->aa.align;
a_mask = (1 << a_bits) - 1;
-#ifdef CONFIG_SOFTMMU
- unsigned s_mask = (1 << s_bits) - 1;
- int mem_index = get_mmuidx(oi);
- int fast_off = tlb_mask_table_ofs(s, mem_index);
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
- int add_off = offsetof(CPUTLBEntry, addend);
- int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write);
-
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
-
- /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off);
-
- /* Extract the TLB index from the address into TMP3. */
- if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo,
- s->page_bits - CPU_TLB_ENTRY_BITS);
- } else {
- tcg_out_dsrl(s, TCG_TMP3, addrlo,
- s->page_bits - CPU_TLB_ENTRY_BITS);
- }
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
+ if (tcg_use_softmmu) {
+ unsigned s_mask = (1 << s_bits) - 1;
+ int mem_index = get_mmuidx(oi);
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
+ int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
+ int table_off = fast_off + offsetof(CPUTLBDescFast, table);
+ int add_off = offsetof(CPUTLBEntry, addend);
+ int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write);
- /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */
- tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addrlo;
+ ldst->addrhi_reg = addrhi;
- if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
- /* Load the (low half) tlb comparator. */
- tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3,
- cmp_off + HOST_BIG_ENDIAN * 4);
- } else {
- tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off);
- }
+ /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off);
- if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
- /* Load the tlb addend for the fast path. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
- }
+ /* Extract the TLB index from the address into TMP3. */
+ if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo,
+ s->page_bits - CPU_TLB_ENTRY_BITS);
+ } else {
+ tcg_out_dsrl(s, TCG_TMP3, addrlo,
+ s->page_bits - CPU_TLB_ENTRY_BITS);
+ }
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
+
+ /* Add the tlb_table pointer, creating the CPUTLBEntry address. */
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
- /*
- * Mask the page bits, keeping the alignment bits to compare against.
- * For unaligned accesses, compare against the end of the access to
- * verify that it does not cross a page boundary.
- */
- tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask);
- if (a_mask < s_mask) {
if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
- tcg_out_opc_imm(s, OPC_ADDIU, TCG_TMP2, addrlo, s_mask - a_mask);
+ /* Load the (low half) tlb comparator. */
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3,
+ cmp_off + HOST_BIG_ENDIAN * 4);
} else {
- tcg_out_opc_imm(s, OPC_DADDIU, TCG_TMP2, addrlo, s_mask - a_mask);
+ tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off);
}
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
- } else {
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo);
- }
- /* Zero extend a 32-bit guest address for a 64-bit host. */
- if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
- tcg_out_ext32u(s, TCG_TMP2, addrlo);
- addrlo = TCG_TMP2;
- }
+ if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
+ /* Load the tlb addend for the fast path. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
+ }
- ldst->label_ptr[0] = s->code_ptr;
- tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
+ /*
+ * Mask the page bits, keeping the alignment bits to compare against.
+ * For unaligned accesses, compare against the end of the access to
+ * verify that it does not cross a page boundary.
+ */
+ tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask);
+ if (a_mask < s_mask) {
+ tcg_out_opc_imm(s, (TCG_TARGET_REG_BITS == 32
+ || addr_type == TCG_TYPE_I32
+ ? OPC_ADDIU : OPC_DADDIU),
+ TCG_TMP2, addrlo, s_mask - a_mask);
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
+ } else {
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo);
+ }
- /* Load and test the high half tlb comparator. */
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
- /* delay slot */
- tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
+ /* Zero extend a 32-bit guest address for a 64-bit host. */
+ if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
+ tcg_out_ext32u(s, TCG_TMP2, addrlo);
+ addrlo = TCG_TMP2;
+ }
- /* Load the tlb addend for the fast path. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
- ldst->label_ptr[1] = s->code_ptr;
- tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0);
- }
+ /* Load and test the high half tlb comparator. */
+ if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
+ /* delay slot */
+ tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
- /* delay slot */
- base = TCG_TMP3;
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo);
-#else
- if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) {
- ldst = new_ldst_label(s);
+ /* Load the tlb addend for the fast path. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ ldst->label_ptr[1] = s->code_ptr;
+ tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0);
+ }
- /* We are expecting a_bits to max out at 7, much lower than ANDI. */
- tcg_debug_assert(a_bits < 16);
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
+ /* delay slot */
+ base = TCG_TMP3;
+ tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo);
+ } else {
+ if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) {
+ ldst = new_ldst_label(s);
- ldst->label_ptr[0] = s->code_ptr;
- if (use_mips32r6_instructions) {
- tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0);
- } else {
- tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO);
- tcg_out_nop(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addrlo;
+ ldst->addrhi_reg = addrhi;
+
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
+ tcg_debug_assert(a_bits < 16);
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
+
+ ldst->label_ptr[0] = s->code_ptr;
+ if (use_mips32r6_instructions) {
+ tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0);
+ } else {
+ tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO);
+ tcg_out_nop(s);
+ }
}
- }
- base = addrlo;
- if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
- tcg_out_ext32u(s, TCG_REG_A0, base);
- base = TCG_REG_A0;
- }
- if (guest_base) {
- if (guest_base == (int16_t)guest_base) {
- tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
- } else {
- tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
- TCG_GUEST_BASE_REG);
+ base = addrlo;
+ if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
+ tcg_out_ext32u(s, TCG_REG_A0, base);
+ base = TCG_REG_A0;
+ }
+ if (guest_base) {
+ if (guest_base == (int16_t)guest_base) {
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
+ } else {
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
+ TCG_GUEST_BASE_REG);
+ }
+ base = TCG_REG_A0;
}
- base = TCG_REG_A0;
}
-#endif
h->base = base;
return ldst;
@@ -2465,8 +2462,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
}
-#ifndef CONFIG_SOFTMMU
- if (guest_base != (int16_t)guest_base) {
+ if (!tcg_use_softmmu && guest_base != (int16_t)guest_base) {
/*
* The function call abi for n32 and n64 will have loaded $25 (t9)
* with the address of the prologue, so we can use that instead
@@ -2479,7 +2475,6 @@ static void tcg_target_qemu_prologue(TCGContext *s)
TCG_TARGET_REG_BITS == 64 ? TCG_REG_T9 : 0);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
-#endif
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 3013eb04e6..2db5177c32 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -688,12 +688,14 @@ static void finish_folding(OptContext *ctx, TCGOp *op)
int i, nb_oargs;
/*
- * For an opcode that ends a BB, reset all temp data.
- * We do no cross-BB optimization.
+ * We only optimize extended basic blocks. If the opcode ends a BB
+ * and is not a conditional branch, reset all temp data.
*/
if (def->flags & TCG_OPF_BB_END) {
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
ctx->prev_mb = NULL;
+ if (!(def->flags & TCG_OPF_COND_BRANCH)) {
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
+ }
return;
}
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index 5c873b2161..856c3b18f5 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -83,7 +83,7 @@
#define TCG_VEC_TMP2 TCG_REG_V1
#define TCG_REG_TB TCG_REG_R31
-#define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
+#define USE_REG_TB (TCG_TARGET_REG_BITS == 64 && !have_isa_3_00)
/* Shorthand for size of a pointer. Avoid promotion to unsigned. */
#define SZP ((int)sizeof(void *))
@@ -101,11 +101,13 @@
#define ALL_GENERAL_REGS 0xffffffffu
#define ALL_VECTOR_REGS 0xffffffff00000000ull
+#ifndef R_PPC64_PCREL34
+#define R_PPC64_PCREL34 132
+#endif
+
#define have_isel (cpuinfo & CPUINFO_ISEL)
-#ifndef CONFIG_SOFTMMU
-#define TCG_GUEST_BASE_REG 30
-#endif
+#define TCG_GUEST_BASE_REG TCG_REG_R30
#ifdef CONFIG_DEBUG_TCG
static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
@@ -215,13 +217,19 @@ static const int tcg_target_callee_save_regs[] = {
TCG_REG_R31
};
+/* For PPC, we use TB+4 instead of TB as the base. */
+static inline ptrdiff_t ppc_tbrel_diff(TCGContext *s, const void *target)
+{
+ return tcg_tbrel_diff(s, target) - 4;
+}
+
static inline bool in_range_b(tcg_target_long target)
{
return target == sextract64(target, 0, 26);
}
static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
- const tcg_insn_unit *target)
+ const tcg_insn_unit *target)
{
ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
tcg_debug_assert(in_range_b(disp));
@@ -241,7 +249,7 @@ static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
}
static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
- const tcg_insn_unit *target)
+ const tcg_insn_unit *target)
{
ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
tcg_debug_assert(disp == (int16_t) disp);
@@ -260,6 +268,19 @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
return false;
}
+static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
+{
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
+ ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
+
+ if (disp == sextract64(disp, 0, 34)) {
+ src_rw[0] = (src_rw[0] & ~0x3ffff) | ((disp >> 16) & 0x3ffff);
+ src_rw[1] = (src_rw[1] & ~0xffff) | (disp & 0xffff);
+ return true;
+ }
+ return false;
+}
+
/* test if a constant matches the constraint */
static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
{
@@ -323,6 +344,15 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
#define STDX XO31(149)
#define STQ XO62( 2)
+#define PLWA OPCD( 41)
+#define PLD OPCD( 57)
+#define PLXSD OPCD( 42)
+#define PLXV OPCD(25 * 2 + 1) /* force tx=1 */
+
+#define PSTD OPCD( 61)
+#define PSTXSD OPCD( 46)
+#define PSTXV OPCD(27 * 2 + 1) /* force sx=1 */
+
#define ADDIC OPCD( 12)
#define ADDI OPCD( 14)
#define ADDIS OPCD( 15)
@@ -356,6 +386,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
#define CRNAND XO19(225)
#define CROR XO19(449)
#define CRNOR XO19( 33)
+#define ADDPCIS XO19( 2)
#define EXTSB XO31(954)
#define EXTSH XO31(922)
@@ -680,6 +711,8 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
return reloc_pc14(code_ptr, target);
case R_PPC_REL24:
return reloc_pc24(code_ptr, target);
+ case R_PPC64_PCREL34:
+ return reloc_pc34(code_ptr, target);
case R_PPC_ADDR16:
/*
* We are (slightly) abusing this relocation type. In particular,
@@ -712,6 +745,52 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
return true;
}
+/* Ensure that the prefixed instruction does not cross a 64-byte boundary. */
+static bool tcg_out_need_prefix_align(TCGContext *s)
+{
+ return ((uintptr_t)s->code_ptr & 0x3f) == 0x3c;
+}
+
+static void tcg_out_prefix_align(TCGContext *s)
+{
+ if (tcg_out_need_prefix_align(s)) {
+ tcg_out32(s, NOP);
+ }
+}
+
+static ptrdiff_t tcg_pcrel_diff_for_prefix(TCGContext *s, const void *target)
+{
+ return tcg_pcrel_diff(s, target) - (tcg_out_need_prefix_align(s) ? 4 : 0);
+}
+
+/* Output Type 00 Prefix - 8-Byte Load/Store Form (8LS:D) */
+static void tcg_out_8ls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt,
+ unsigned ra, tcg_target_long imm, bool r)
+{
+ tcg_insn_unit p, i;
+
+ p = OPCD(1) | (r << 20) | ((imm >> 16) & 0x3ffff);
+ i = opc | TAI(rt, ra, imm);
+
+ tcg_out_prefix_align(s);
+ tcg_out32(s, p);
+ tcg_out32(s, i);
+}
+
+/* Output Type 10 Prefix - Modified Load/Store Form (MLS:D) */
+static void tcg_out_mls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt,
+ unsigned ra, tcg_target_long imm, bool r)
+{
+ tcg_insn_unit p, i;
+
+ p = OPCD(1) | (2 << 24) | (r << 20) | ((imm >> 16) & 0x3ffff);
+ i = opc | TAI(rt, ra, imm);
+
+ tcg_out_prefix_align(s);
+ tcg_out32(s, p);
+ tcg_out32(s, i);
+}
+
static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
TCGReg base, tcg_target_long offset);
@@ -853,6 +932,19 @@ static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
}
+static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm)
+{
+ uint32_t d0, d1, d2;
+
+ tcg_debug_assert((imm & 0xffff) == 0);
+ tcg_debug_assert(imm == (int32_t)imm);
+
+ d2 = extract32(imm, 16, 1);
+ d1 = extract32(imm, 17, 5);
+ d0 = extract32(imm, 22, 10);
+ tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2);
+}
+
static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
{
TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
@@ -991,12 +1083,31 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
}
/* Load addresses within the TB with one insn. */
- tb_diff = tcg_tbrel_diff(s, (void *)arg);
+ tb_diff = ppc_tbrel_diff(s, (void *)arg);
if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
return;
}
+ /*
+ * Load values up to 34 bits, and pc-relative addresses,
+ * with one prefixed insn.
+ */
+ if (have_isa_3_10) {
+ if (arg == sextract64(arg, 0, 34)) {
+ /* pli ret,value = paddi ret,0,value,0 */
+ tcg_out_mls_d(s, ADDI, ret, 0, arg, 0);
+ return;
+ }
+
+ tmp = tcg_pcrel_diff_for_prefix(s, (void *)arg);
+ if (tmp == sextract64(tmp, 0, 34)) {
+ /* pla ret,value = paddi ret,0,value,1 */
+ tcg_out_mls_d(s, ADDI, ret, 0, tmp, 1);
+ return;
+ }
+ }
+
/* Load 32-bit immediates with two insns. Note that we've already
eliminated bare ADDIS, so we know both insns are required. */
if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
@@ -1035,6 +1146,19 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
return;
}
+ /* Load addresses within 2GB with 2 insns. */
+ if (have_isa_3_00) {
+ intptr_t hi = tcg_pcrel_diff(s, (void *)arg) - 4;
+ int16_t lo = hi;
+
+ hi -= lo;
+ if (hi == (int32_t)hi) {
+ tcg_out_addpcis(s, TCG_REG_TMP2, hi);
+ tcg_out32(s, ADDI | TAI(ret, TCG_REG_TMP2, lo));
+ return;
+ }
+ }
+
/* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
@@ -1044,10 +1168,21 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
/* Use the constant pool, if possible. */
if (!in_prologue && USE_REG_TB) {
new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
- tcg_tbrel_diff(s, NULL));
+ ppc_tbrel_diff(s, NULL));
tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
return;
}
+ if (have_isa_3_10) {
+ tcg_out_8ls_d(s, PLD, ret, 0, 0, 1);
+ new_pool_label(s, arg, R_PPC64_PCREL34, s->code_ptr - 2, 0);
+ return;
+ }
+ if (have_isa_3_00) {
+ tcg_out_addpcis(s, TCG_REG_TMP2, 0);
+ new_pool_label(s, arg, R_PPC_REL14, s->code_ptr, 0);
+ tcg_out32(s, LD | TAI(ret, TCG_REG_TMP2, 0));
+ return;
+ }
tmp = arg >> 31 >> 1;
tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
@@ -1104,7 +1239,20 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
*/
if (USE_REG_TB) {
rel = R_PPC_ADDR16;
- add = tcg_tbrel_diff(s, NULL);
+ add = ppc_tbrel_diff(s, NULL);
+ } else if (have_isa_3_10) {
+ if (type == TCG_TYPE_V64) {
+ tcg_out_8ls_d(s, PLXSD, ret & 31, 0, 0, 1);
+ new_pool_label(s, val, R_PPC64_PCREL34, s->code_ptr - 2, 0);
+ } else {
+ tcg_out_8ls_d(s, PLXV, ret & 31, 0, 0, 1);
+ new_pool_l2(s, R_PPC64_PCREL34, s->code_ptr - 2, 0, val, val);
+ }
+ return;
+ } else if (have_isa_3_00) {
+ tcg_out_addpcis(s, TCG_REG_TMP1, 0);
+ rel = R_PPC_REL14;
+ add = 0;
} else {
rel = R_PPC_ADDR32;
add = 0;
@@ -1131,6 +1279,8 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
if (USE_REG_TB) {
tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
load_insn |= RA(TCG_REG_TB);
+ } else if (have_isa_3_00) {
+ tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
} else {
tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
@@ -1322,6 +1472,49 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
break;
}
+ /* For unaligned or large offsets, use the prefixed form. */
+ if (have_isa_3_10
+ && (offset != (int16_t)offset || (offset & align))
+ && offset == sextract64(offset, 0, 34)) {
+ /*
+ * Note that the MLS:D insns retain their un-prefixed opcode,
+ * while the 8LS:D insns use a different opcode space.
+ */
+ switch (opi) {
+ case LBZ:
+ case LHZ:
+ case LHA:
+ case LWZ:
+ case STB:
+ case STH:
+ case STW:
+ case ADDI:
+ tcg_out_mls_d(s, opi, rt, base, offset, 0);
+ return;
+ case LWA:
+ tcg_out_8ls_d(s, PLWA, rt, base, offset, 0);
+ return;
+ case LD:
+ tcg_out_8ls_d(s, PLD, rt, base, offset, 0);
+ return;
+ case STD:
+ tcg_out_8ls_d(s, PSTD, rt, base, offset, 0);
+ return;
+ case LXSD:
+ tcg_out_8ls_d(s, PLXSD, rt & 31, base, offset, 0);
+ return;
+ case STXSD:
+ tcg_out_8ls_d(s, PSTXSD, rt & 31, base, offset, 0);
+ return;
+ case LXV:
+ tcg_out_8ls_d(s, PLXV, rt & 31, base, offset, 0);
+ return;
+ case STXV:
+ tcg_out_8ls_d(s, PSTXV, rt & 31, base, offset, 0);
+ return;
+ }
+ }
+
/* For unaligned, or very large offsets, use the indexed form. */
if (offset & align || offset != (int32_t)offset || opi == 0) {
if (rs == base) {
@@ -2122,152 +2315,158 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
s_bits == MO_128);
a_bits = h->aa.align;
-#ifdef CONFIG_SOFTMMU
- int mem_index = get_mmuidx(oi);
- int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write);
- int fast_off = tlb_mask_table_ofs(s, mem_index);
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
-
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
-
- /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
-
- /* Extract the page index, shifted into place for tlb index. */
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_shri32(s, TCG_REG_R0, addrlo,
- s->page_bits - CPU_TLB_ENTRY_BITS);
- } else {
- tcg_out_shri64(s, TCG_REG_R0, addrlo,
- s->page_bits - CPU_TLB_ENTRY_BITS);
- }
- tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
+ if (tcg_use_softmmu) {
+ int mem_index = get_mmuidx(oi);
+ int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write);
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
+ int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
+ int table_off = fast_off + offsetof(CPUTLBDescFast, table);
- /*
- * Load the (low part) TLB comparator into TMP2.
- * For 64-bit host, always load the entire 64-bit slot for simplicity.
- * We will ignore the high bits with tcg_out_cmp(..., addr_type).
- */
- if (TCG_TARGET_REG_BITS == 64) {
- if (cmp_off == 0) {
- tcg_out32(s, LDUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addrlo;
+ ldst->addrhi_reg = addrhi;
+
+ /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
+
+ /* Extract the page index, shifted into place for tlb index. */
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_out_shri32(s, TCG_REG_R0, addrlo,
+ s->page_bits - CPU_TLB_ENTRY_BITS);
} else {
- tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
- tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
+ tcg_out_shri64(s, TCG_REG_R0, addrlo,
+ s->page_bits - CPU_TLB_ENTRY_BITS);
}
- } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
- tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
- } else {
- tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
- cmp_off + 4 * HOST_BIG_ENDIAN);
- }
+ tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
- /*
- * Load the TLB addend for use on the fast path.
- * Do this asap to minimize any load use delay.
- */
- if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
- offsetof(CPUTLBEntry, addend));
- }
-
- /* Clear the non-page, non-alignment bits from the address in R0. */
- if (TCG_TARGET_REG_BITS == 32) {
/*
- * We don't support unaligned accesses on 32-bits.
- * Preserve the bottom bits and thus trigger a comparison
- * failure on unaligned accesses.
+ * Load the (low part) TLB comparator into TMP2.
+ * For 64-bit host, always load the entire 64-bit slot for simplicity.
+ * We will ignore the high bits with tcg_out_cmp(..., addr_type).
*/
- if (a_bits < s_bits) {
- a_bits = s_bits;
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (cmp_off == 0) {
+ tcg_out32(s, LDUX | TAB(TCG_REG_TMP2,
+ TCG_REG_TMP1, TCG_REG_TMP2));
+ } else {
+ tcg_out32(s, ADD | TAB(TCG_REG_TMP1,
+ TCG_REG_TMP1, TCG_REG_TMP2));
+ tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2,
+ TCG_REG_TMP1, cmp_off);
+ }
+ } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
+ tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2,
+ TCG_REG_TMP1, TCG_REG_TMP2));
+ } else {
+ tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
+ cmp_off + 4 * HOST_BIG_ENDIAN);
}
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
- (32 - a_bits) & 31, 31 - s->page_bits);
- } else {
- TCGReg t = addrlo;
/*
- * If the access is unaligned, we need to make sure we fail if we
- * cross a page boundary. The trick is to add the access size-1
- * to the address before masking the low bits. That will make the
- * address overflow to the next page if we cross a page boundary,
- * which will then force a mismatch of the TLB compare.
+ * Load the TLB addend for use on the fast path.
+ * Do this asap to minimize any load use delay.
*/
- if (a_bits < s_bits) {
- unsigned a_mask = (1 << a_bits) - 1;
- unsigned s_mask = (1 << s_bits) - 1;
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
- t = TCG_REG_R0;
+ if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
+ offsetof(CPUTLBEntry, addend));
}
- /* Mask the address for the requested alignment. */
- if (addr_type == TCG_TYPE_I32) {
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
+ /* Clear the non-page, non-alignment bits from the address in R0. */
+ if (TCG_TARGET_REG_BITS == 32) {
+ /*
+ * We don't support unaligned accesses on 32-bits.
+ * Preserve the bottom bits and thus trigger a comparison
+ * failure on unaligned accesses.
+ */
+ if (a_bits < s_bits) {
+ a_bits = s_bits;
+ }
+ tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
(32 - a_bits) & 31, 31 - s->page_bits);
- } else if (a_bits == 0) {
- tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
} else {
- tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
- 64 - s->page_bits, s->page_bits - a_bits);
- tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
- }
- }
+ TCGReg t = addrlo;
+
+ /*
+ * If the access is unaligned, we need to make sure we fail if we
+ * cross a page boundary. The trick is to add the access size-1
+ * to the address before masking the low bits. That will make the
+ * address overflow to the next page if we cross a page boundary,
+ * which will then force a mismatch of the TLB compare.
+ */
+ if (a_bits < s_bits) {
+ unsigned a_mask = (1 << a_bits) - 1;
+ unsigned s_mask = (1 << s_bits) - 1;
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
+ t = TCG_REG_R0;
+ }
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
- /* Low part comparison into cr7. */
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
- 0, 7, TCG_TYPE_I32);
+ /* Mask the address for the requested alignment. */
+ if (addr_type == TCG_TYPE_I32) {
+ tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
+ (32 - a_bits) & 31, 31 - s->page_bits);
+ } else if (a_bits == 0) {
+ tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
+ } else {
+ tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
+ 64 - s->page_bits, s->page_bits - a_bits);
+ tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
+ }
+ }
- /* Load the high part TLB comparator into TMP2. */
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
- cmp_off + 4 * !HOST_BIG_ENDIAN);
+ if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
+ /* Low part comparison into cr7. */
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
+ 0, 7, TCG_TYPE_I32);
- /* Load addend, deferred for this case. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
- offsetof(CPUTLBEntry, addend));
+ /* Load the high part TLB comparator into TMP2. */
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
+ cmp_off + 4 * !HOST_BIG_ENDIAN);
- /* High part comparison into cr6. */
- tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, 0, 6, TCG_TYPE_I32);
+ /* Load addend, deferred for this case. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
+ offsetof(CPUTLBEntry, addend));
- /* Combine comparisons into cr7. */
- tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
- } else {
- /* Full comparison into cr7. */
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 7, addr_type);
- }
+ /* High part comparison into cr6. */
+ tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2,
+ 0, 6, TCG_TYPE_I32);
- /* Load a pointer into the current opcode w/conditional branch-link. */
- ldst->label_ptr[0] = s->code_ptr;
- tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
+ /* Combine comparisons into cr7. */
+ tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
+ } else {
+ /* Full comparison into cr7. */
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
+ 0, 7, addr_type);
+ }
- h->base = TCG_REG_TMP1;
-#else
- if (a_bits) {
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addrlo;
- ldst->addrhi_reg = addrhi;
+ /* Load a pointer into the current opcode w/conditional branch-link. */
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
- /* We are expecting a_bits to max out at 7, much lower than ANDI. */
- tcg_debug_assert(a_bits < 16);
- tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
+ h->base = TCG_REG_TMP1;
+ } else {
+ if (a_bits) {
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addrlo;
+ ldst->addrhi_reg = addrhi;
+
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
+ tcg_debug_assert(a_bits < 16);
+ tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
+
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
+ }
- ldst->label_ptr[0] = s->code_ptr;
- tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
+ h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
}
- h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
-#endif
-
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
/* Zero-extend the guest address for use in the host address. */
tcg_out_ext32u(s, TCG_REG_R0, addrlo);
@@ -2500,18 +2699,13 @@ static void tcg_target_qemu_prologue(TCGContext *s)
}
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
-#ifndef CONFIG_SOFTMMU
- if (guest_base) {
+ if (!tcg_use_softmmu && guest_base) {
tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
-#endif
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
- if (USE_REG_TB) {
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
- }
tcg_out32(s, BCCTR | BO_ALWAYS);
/* Epilogue */
@@ -2529,7 +2723,17 @@ static void tcg_target_qemu_prologue(TCGContext *s)
static void tcg_out_tb_start(TCGContext *s)
{
- /* nothing to do */
+ /* Load TCG_REG_TB. */
+ if (USE_REG_TB) {
+ if (have_isa_3_00) {
+ /* lnia REG_TB */
+ tcg_out_addpcis(s, TCG_REG_TB, 0);
+ } else {
+ /* bcl 20,31,$+4 (preferred form for getting nia) */
+ tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK);
+ tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR);
+ }
+ }
}
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
@@ -2541,33 +2745,33 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
static void tcg_out_goto_tb(TCGContext *s, int which)
{
uintptr_t ptr = get_jmp_target_addr(s, which);
+ int16_t lo;
- if (USE_REG_TB) {
- ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
- tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
-
- /* TODO: Use direct branches when possible. */
- set_jmp_insn_offset(s, which);
- tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
-
- tcg_out32(s, BCCTR | BO_ALWAYS);
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, NOP);
- /* For the unlinked case, need to reset TCG_REG_TB. */
- set_jmp_reset_offset(s, which);
- tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
- -tcg_current_code_size(s));
+ /* When branch is out of range, fall through to indirect. */
+ if (USE_REG_TB) {
+ ptrdiff_t offset = ppc_tbrel_diff(s, (void *)ptr);
+ tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset);
+ } else if (have_isa_3_10) {
+ ptrdiff_t offset = tcg_pcrel_diff_for_prefix(s, (void *)ptr);
+ tcg_out_8ls_d(s, PLD, TCG_REG_TMP1, 0, offset, 1);
+ } else if (have_isa_3_00) {
+ ptrdiff_t offset = tcg_pcrel_diff(s, (void *)ptr) - 4;
+ lo = offset;
+ tcg_out_addpcis(s, TCG_REG_TMP1, offset - lo);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo);
} else {
- /* Direct branch will be patched by tb_target_set_jmp_target. */
- set_jmp_insn_offset(s, which);
- tcg_out32(s, NOP);
-
- /* When branch is out of range, fall through to indirect. */
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
- tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
- tcg_out32(s, BCCTR | BO_ALWAYS);
- set_jmp_reset_offset(s, which);
+ lo = ptr;
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - lo);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo);
}
+
+ tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
+ tcg_out32(s, BCCTR | BO_ALWAYS);
+ set_jmp_reset_offset(s, which);
}
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
@@ -2577,10 +2781,6 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
intptr_t diff = addr - jmp_rx;
tcg_insn_unit insn;
- if (USE_REG_TB) {
- return;
- }
-
if (in_range_b(diff)) {
insn = B | (diff & 0x3fffffc);
} else {
@@ -2600,9 +2800,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_goto_ptr:
tcg_out32(s, MTSPR | RS(args[0]) | CTR);
- if (USE_REG_TB) {
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
- }
tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
tcg_out32(s, BCCTR | BO_ALWAYS);
break;
@@ -3645,7 +3842,7 @@ static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
tcgv_vec_arg(t1), tcgv_vec_arg(t2));
vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
tcgv_vec_arg(v0), tcgv_vec_arg(t1));
- break;
+ break;
case MO_32:
tcg_debug_assert(!have_isa_2_07);
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index d6dbcaf3cb..34e10e77d9 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -1245,105 +1245,110 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
a_mask = (1u << aa.align) - 1;
-#ifdef CONFIG_SOFTMMU
- unsigned s_bits = opc & MO_SIZE;
- unsigned s_mask = (1u << s_bits) - 1;
- int mem_index = get_mmuidx(oi);
- int fast_ofs = tlb_mask_table_ofs(s, mem_index);
- int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
- int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
- int compare_mask;
- TCGReg addr_adj;
-
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
-
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
-
- tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
- s->page_bits - CPU_TLB_ENTRY_BITS);
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
+ if (tcg_use_softmmu) {
+ unsigned s_bits = opc & MO_SIZE;
+ unsigned s_mask = (1u << s_bits) - 1;
+ int mem_index = get_mmuidx(oi);
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
+ int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
+ int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
+ int compare_mask;
+ TCGReg addr_adj;
- /*
- * For aligned accesses, we check the first byte and include the alignment
- * bits within the address. For unaligned access, we check that we don't
- * cross pages using the address of the last byte of the access.
- */
- addr_adj = addr_reg;
- if (a_mask < s_mask) {
- addr_adj = TCG_REG_TMP0;
- tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
- addr_adj, addr_reg, s_mask - a_mask);
- }
- compare_mask = s->page_mask | a_mask;
- if (compare_mask == sextreg(compare_mask, 0, 12)) {
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
- } else {
- tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
- }
-
- /* Load the tlb comparator and the addend. */
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
- tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
- is_ld ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write));
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
- offsetof(CPUTLBEntry, addend));
-
- /* Compare masked address with the TLB entry. */
- ldst->label_ptr[0] = s->code_ptr;
- tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
-
- /* TLB Hit - translate address using addend. */
- if (addr_type != TCG_TYPE_I32) {
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
- } else if (have_zba) {
- tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
- } else {
- tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, TCG_REG_TMP2);
- }
- *pbase = TCG_REG_TMP0;
-#else
- TCGReg base;
-
- if (a_mask) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;
- /* We are expecting alignment max 7, so we can always use andi. */
- tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
+
+ tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
+ s->page_bits - CPU_TLB_ENTRY_BITS);
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
+
+ /*
+ * For aligned accesses, we check the first byte and include the
+ * alignment bits within the address. For unaligned access, we
+ * check that we don't cross pages using the address of the last
+ * byte of the access.
+ */
+ addr_adj = addr_reg;
+ if (a_mask < s_mask) {
+ addr_adj = TCG_REG_TMP0;
+ tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
+ addr_adj, addr_reg, s_mask - a_mask);
+ }
+ compare_mask = s->page_mask | a_mask;
+ if (compare_mask == sextreg(compare_mask, 0, 12)) {
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
+ } else {
+ tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
+ }
+
+ /* Load the tlb comparator and the addend. */
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
+ tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
+ is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
+ offsetof(CPUTLBEntry, addend));
+ /* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
- tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
- }
+ tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
- if (guest_base != 0) {
- base = TCG_REG_TMP0;
+ /* TLB Hit - translate address using addend. */
if (addr_type != TCG_TYPE_I32) {
- tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, TCG_GUEST_BASE_REG);
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
} else if (have_zba) {
- tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, TCG_GUEST_BASE_REG);
+ tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0,
+ addr_reg, TCG_REG_TMP2);
} else {
- tcg_out_ext32u(s, base, addr_reg);
- tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
+ tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0,
+ TCG_REG_TMP0, TCG_REG_TMP2);
}
- } else if (addr_type != TCG_TYPE_I32) {
- base = addr_reg;
+ *pbase = TCG_REG_TMP0;
} else {
- base = TCG_REG_TMP0;
- tcg_out_ext32u(s, base, addr_reg);
+ TCGReg base;
+
+ if (a_mask) {
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
+
+ /* We are expecting alignment max 7, so we can always use andi. */
+ tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
+
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
+ }
+
+ if (guest_base != 0) {
+ base = TCG_REG_TMP0;
+ if (addr_type != TCG_TYPE_I32) {
+ tcg_out_opc_reg(s, OPC_ADD, base, addr_reg,
+ TCG_GUEST_BASE_REG);
+ } else if (have_zba) {
+ tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg,
+ TCG_GUEST_BASE_REG);
+ } else {
+ tcg_out_ext32u(s, base, addr_reg);
+ tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
+ }
+ } else if (addr_type != TCG_TYPE_I32) {
+ base = addr_reg;
+ } else {
+ base = TCG_REG_TMP0;
+ tcg_out_ext32u(s, base, addr_reg);
+ }
+ *pbase = base;
}
- *pbase = base;
-#endif
return ldst;
}
@@ -2075,10 +2080,10 @@ static void tcg_target_qemu_prologue(TCGContext *s)
TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
}
-#if !defined(CONFIG_SOFTMMU)
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
- tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
-#endif
+ if (!tcg_use_softmmu && guest_base) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
+ tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
+ }
/* Call generated code */
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index 4ef9ac3d5b..fbee43d3b0 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -46,9 +46,7 @@
/* A scratch register that may be be used throughout the backend. */
#define TCG_TMP0 TCG_REG_R1
-#ifndef CONFIG_SOFTMMU
#define TCG_GUEST_BASE_REG TCG_REG_R13
-#endif
/* All of the following instructions are prefixed with their instruction
format, and are defined as 8- or 16-bit quantities, even when the two
@@ -1768,94 +1766,95 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
a_mask = (1 << h->aa.align) - 1;
-#ifdef CONFIG_SOFTMMU
- unsigned s_mask = (1 << s_bits) - 1;
- int mem_index = get_mmuidx(oi);
- int fast_off = tlb_mask_table_ofs(s, mem_index);
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
- int ofs, a_off;
- uint64_t tlb_mask;
+ if (tcg_use_softmmu) {
+ unsigned s_mask = (1 << s_bits) - 1;
+ int mem_index = get_mmuidx(oi);
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
+ int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
+ int table_off = fast_off + offsetof(CPUTLBDescFast, table);
+ int ofs, a_off;
+ uint64_t tlb_mask;
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
- tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
- s->page_bits - CPU_TLB_ENTRY_BITS);
+ tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
+ s->page_bits - CPU_TLB_ENTRY_BITS);
- tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
- tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
+ tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
+ tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
- /*
- * For aligned accesses, we check the first byte and include the alignment
- * bits within the address. For unaligned access, we check that we don't
- * cross pages using the address of the last byte of the access.
- */
- a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
- tlb_mask = (uint64_t)s->page_mask | a_mask;
- if (a_off == 0) {
- tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
- } else {
- tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
- tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
- }
+ /*
+ * For aligned accesses, we check the first byte and include the
+ * alignment bits within the address. For unaligned access, we
+ * check that we don't cross pages using the address of the last
+ * byte of the access.
+ */
+ a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
+ tlb_mask = (uint64_t)s->page_mask | a_mask;
+ if (a_off == 0) {
+ tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
+ } else {
+ tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
+ tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
+ }
- if (is_ld) {
- ofs = offsetof(CPUTLBEntry, addr_read);
- } else {
- ofs = offsetof(CPUTLBEntry, addr_write);
- }
- if (addr_type == TCG_TYPE_I32) {
- ofs += HOST_BIG_ENDIAN * 4;
- tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
- } else {
- tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
- }
+ if (is_ld) {
+ ofs = offsetof(CPUTLBEntry, addr_read);
+ } else {
+ ofs = offsetof(CPUTLBEntry, addr_write);
+ }
+ if (addr_type == TCG_TYPE_I32) {
+ ofs += HOST_BIG_ENDIAN * 4;
+ tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
+ } else {
+ tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
+ }
- tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
- ldst->label_ptr[0] = s->code_ptr++;
+ tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
+ ldst->label_ptr[0] = s->code_ptr++;
- h->index = TCG_TMP0;
- tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
- offsetof(CPUTLBEntry, addend));
+ h->index = TCG_TMP0;
+ tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
+ offsetof(CPUTLBEntry, addend));
- if (addr_type == TCG_TYPE_I32) {
- tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
- h->base = TCG_REG_NONE;
+ if (addr_type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
+ h->base = TCG_REG_NONE;
+ } else {
+ h->base = addr_reg;
+ }
+ h->disp = 0;
} else {
- h->base = addr_reg;
- }
- h->disp = 0;
-#else
- if (a_mask) {
- ldst = new_ldst_label(s);
- ldst->is_ld = is_ld;
- ldst->oi = oi;
- ldst->addrlo_reg = addr_reg;
+ if (a_mask) {
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
- /* We are expecting a_bits to max out at 7, much lower than TMLL. */
- tcg_debug_assert(a_mask <= 0xffff);
- tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
+ /* We are expecting a_bits to max out at 7, much lower than TMLL. */
+ tcg_debug_assert(a_mask <= 0xffff);
+ tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
- tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
- ldst->label_ptr[0] = s->code_ptr++;
- }
+ tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
+ ldst->label_ptr[0] = s->code_ptr++;
+ }
- h->base = addr_reg;
- if (addr_type == TCG_TYPE_I32) {
- tcg_out_ext32u(s, TCG_TMP0, addr_reg);
- h->base = TCG_TMP0;
- }
- if (guest_base < 0x80000) {
- h->index = TCG_REG_NONE;
- h->disp = guest_base;
- } else {
- h->index = TCG_GUEST_BASE_REG;
- h->disp = 0;
+ h->base = addr_reg;
+ if (addr_type == TCG_TYPE_I32) {
+ tcg_out_ext32u(s, TCG_TMP0, addr_reg);
+ h->base = TCG_TMP0;
+ }
+ if (guest_base < 0x80000) {
+ h->index = TCG_REG_NONE;
+ h->disp = guest_base;
+ } else {
+ h->index = TCG_GUEST_BASE_REG;
+ h->disp = 0;
+ }
}
-#endif
return ldst;
}
@@ -3453,12 +3452,10 @@ static void tcg_target_qemu_prologue(TCGContext *s)
TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
CPU_TEMP_BUF_NLONGS * sizeof(long));
-#ifndef CONFIG_SOFTMMU
- if (guest_base >= 0x80000) {
+ if (!tcg_use_softmmu && guest_base >= 0x80000) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
-#endif
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
index df4f22c427..e2c55df217 100644
--- a/tcg/tcg-op-ldst.c
+++ b/tcg/tcg-op-ldst.c
@@ -34,13 +34,13 @@
static void check_max_alignment(unsigned a_bits)
{
-#if defined(CONFIG_SOFTMMU)
/*
* The requested alignment cannot overlap the TLB flags.
* FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
*/
- tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
-#endif
+ if (tcg_use_softmmu) {
+ tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
+ }
}
static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
@@ -411,10 +411,11 @@ void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
*/
static bool use_two_i64_for_i128(MemOp mop)
{
-#ifdef CONFIG_SOFTMMU
/* Two softmmu tlb lookups is larger than one function call. */
- return false;
-#else
+ if (tcg_use_softmmu) {
+ return false;
+ }
+
/*
* For user-only, two 64-bit operations may well be smaller than a call.
* Determine if that would be legal for the requested atomicity.
@@ -432,7 +433,6 @@ static bool use_two_i64_for_i128(MemOp mop)
default:
g_assert_not_reached();
}
-#endif
}
static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
@@ -714,7 +714,7 @@ void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx,
tcg_gen_qemu_st_i128_int(val, addr, idx, memop);
}
-static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
+void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
{
switch (opc & MO_SSIZE) {
case MO_SB:
@@ -729,13 +729,16 @@ static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc)
case MO_UW:
tcg_gen_ext16u_i32(ret, val);
break;
- default:
+ case MO_UL:
+ case MO_SL:
tcg_gen_mov_i32(ret, val);
break;
+ default:
+ g_assert_not_reached();
}
}
-static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
+void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
{
switch (opc & MO_SSIZE) {
case MO_SB:
@@ -756,9 +759,12 @@ static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc)
case MO_UL:
tcg_gen_ext32u_i64(ret, val);
break;
- default:
+ case MO_UQ:
+ case MO_SQ:
tcg_gen_mov_i64(ret, val);
break;
+ default:
+ g_assert_not_reached();
}
}
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 393dbcd01c..828eb9ee46 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -291,6 +291,12 @@ void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret,
}
}
+void tcg_gen_negsetcondi_i32(TCGCond cond, TCGv_i32 ret,
+ TCGv_i32 arg1, int32_t arg2)
+{
+ tcg_gen_negsetcond_i32(cond, ret, arg1, tcg_constant_i32(arg2));
+}
+
void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
if (arg2 == 0) {
@@ -342,8 +348,8 @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div2_i32) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
- tcg_gen_movi_i32(t0, 0);
- tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);
+ TCGv_i32 zero = tcg_constant_i32(0);
+ tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, zero, arg2);
tcg_temp_free_i32(t0);
} else {
gen_helper_divu_i32(ret, arg1, arg2);
@@ -362,8 +368,8 @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
tcg_temp_free_i32(t0);
} else if (TCG_TARGET_HAS_div2_i32) {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
- tcg_gen_movi_i32(t0, 0);
- tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);
+ TCGv_i32 zero = tcg_constant_i32(0);
+ tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, zero, arg2);
tcg_temp_free_i32(t0);
} else {
gen_helper_remu_i32(ret, arg1, arg2);
@@ -1602,6 +1608,12 @@ void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
}
}
+void tcg_gen_negsetcondi_i64(TCGCond cond, TCGv_i64 ret,
+ TCGv_i64 arg1, int64_t arg2)
+{
+ tcg_gen_negsetcond_i64(cond, ret, arg1, tcg_constant_i64(arg2));
+}
+
void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret,
TCGv_i64 arg1, TCGv_i64 arg2)
{
@@ -1674,8 +1686,8 @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
} else if (TCG_TARGET_HAS_div2_i64) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- tcg_gen_movi_i64(t0, 0);
- tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2);
+ TCGv_i64 zero = tcg_constant_i64(0);
+ tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, zero, arg2);
tcg_temp_free_i64(t0);
} else {
gen_helper_divu_i64(ret, arg1, arg2);
@@ -1694,8 +1706,8 @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_temp_free_i64(t0);
} else if (TCG_TARGET_HAS_div2_i64) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
- tcg_gen_movi_i64(t0, 0);
- tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2);
+ TCGv_i64 zero = tcg_constant_i64(0);
+ tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, zero, arg2);
tcg_temp_free_i64(t0);
} else {
gen_helper_remu_i64(ret, arg1, arg2);
@@ -2880,6 +2892,28 @@ void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src)
}
}
+void tcg_gen_ld_i128(TCGv_i128 ret, TCGv_ptr base, tcg_target_long offset)
+{
+ if (HOST_BIG_ENDIAN) {
+ tcg_gen_ld_i64(TCGV128_HIGH(ret), base, offset);
+ tcg_gen_ld_i64(TCGV128_LOW(ret), base, offset + 8);
+ } else {
+ tcg_gen_ld_i64(TCGV128_LOW(ret), base, offset);
+ tcg_gen_ld_i64(TCGV128_HIGH(ret), base, offset + 8);
+ }
+}
+
+void tcg_gen_st_i128(TCGv_i128 val, TCGv_ptr base, tcg_target_long offset)
+{
+ if (HOST_BIG_ENDIAN) {
+ tcg_gen_st_i64(TCGV128_HIGH(val), base, offset);
+ tcg_gen_st_i64(TCGV128_LOW(val), base, offset + 8);
+ } else {
+ tcg_gen_st_i64(TCGV128_LOW(val), base, offset);
+ tcg_gen_st_i64(TCGV128_HIGH(val), base, offset + 8);
+ }
+}
+
/* QEMU specific operations. */
void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 637b9e6870..35158a0846 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -178,6 +178,10 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
static int tcg_out_ldst_finalize(TCGContext *s);
#endif
+#ifndef CONFIG_USER_ONLY
+#define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; })
+#endif
+
typedef struct TCGLdstHelperParam {
TCGReg (*ra_gen)(TCGContext *s, const TCGLabelQemuLdst *l, int arg_reg);
unsigned ntmp;
@@ -226,6 +230,10 @@ static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
MemOp host_atom, bool allow_two_ops)
__attribute__((unused));
+#ifdef CONFIG_USER_ONLY
+bool tcg_use_softmmu;
+#endif
+
TCGContext tcg_init_ctx;
__thread TCGContext *tcg_ctx;
@@ -404,13 +412,12 @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
}
-#if defined(CONFIG_SOFTMMU) && !defined(CONFIG_TCG_INTERPRETER)
-static int tlb_mask_table_ofs(TCGContext *s, int which)
+static int __attribute__((unused))
+tlb_mask_table_ofs(TCGContext *s, int which)
{
return (offsetof(CPUNegativeOffsetState, tlb.f[which]) -
sizeof(CPUNegativeOffsetState));
}
-#endif
/* Signal overflow, starting over with fewer guest insns. */
static G_NORETURN
diff --git a/tests/avocado/s390_topology.py b/tests/avocado/s390_topology.py
new file mode 100644
index 0000000000..9154ac8776
--- /dev/null
+++ b/tests/avocado/s390_topology.py
@@ -0,0 +1,439 @@
+# Functional test that boots a Linux kernel and checks the console
+#
+# Copyright IBM Corp. 2023
+#
+# Author:
+# Pierre Morel <pmorel@linux.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import os
+import shutil
+import time
+
+from avocado_qemu import QemuSystemTest
+from avocado_qemu import exec_command
+from avocado_qemu import exec_command_and_wait_for_pattern
+from avocado_qemu import interrupt_interactive_console_until_pattern
+from avocado_qemu import wait_for_console_pattern
+from avocado.utils import process
+from avocado.utils import archive
+
+
+class S390CPUTopology(QemuSystemTest):
+ """
+ S390x CPU topology consists of 4 topology layers, from bottom to top,
+ the cores, sockets, books and drawers and 2 modifiers attributes,
+ the entitlement and the dedication.
+ See: docs/system/s390x/cpu-topology.rst.
+
+ S390x CPU topology is setup in different ways:
+ - implicitly from the '-smp' argument by completing each topology
+ level one after the other beginning with drawer 0, book 0 and
+ socket 0.
+ - explicitly from the '-device' argument on the QEMU command line
+ - explicitly by hotplug of a new CPU using QMP or HMP
+ - it is modified by using QMP 'set-cpu-topology'
+
+ The S390x modifier attribute entitlement depends on the machine
+ polarization, which can be horizontal or vertical.
+ The polarization is changed on a request from the guest.
+ """
+ timeout = 90
+ event_timeout = 10
+
+ KERNEL_COMMON_COMMAND_LINE = ('printk.time=0 '
+ 'root=/dev/ram '
+ 'selinux=0 '
+ 'rdinit=/bin/sh')
+
+ def wait_until_booted(self):
+ wait_for_console_pattern(self, 'no job control',
+ failure_message='Kernel panic - not syncing',
+ vm=None)
+
+ def check_topology(self, c, s, b, d, e, t):
+ res = self.vm.qmp('query-cpus-fast')
+ cpus = res['return']
+ for cpu in cpus:
+ core = cpu['props']['core-id']
+ socket = cpu['props']['socket-id']
+ book = cpu['props']['book-id']
+ drawer = cpu['props']['drawer-id']
+ entitlement = cpu.get('entitlement')
+ dedicated = cpu.get('dedicated')
+ if core == c:
+ self.assertEqual(drawer, d)
+ self.assertEqual(book, b)
+ self.assertEqual(socket, s)
+ self.assertEqual(entitlement, e)
+ self.assertEqual(dedicated, t)
+
+ def kernel_init(self):
+ """
+ We need a VM that supports CPU topology,
+ currently this only the case when using KVM, not TCG.
+ We need a kernel supporting the CPU topology.
+ We need a minimal root filesystem with a shell.
+ """
+ self.require_accelerator("kvm")
+ kernel_url = ('https://archives.fedoraproject.org/pub/archive'
+ '/fedora-secondary/releases/35/Server/s390x/os'
+ '/images/kernel.img')
+ kernel_hash = '0d1aaaf303f07cf0160c8c48e56fe638'
+ kernel_path = self.fetch_asset(kernel_url, algorithm='md5',
+ asset_hash=kernel_hash)
+
+ initrd_url = ('https://archives.fedoraproject.org/pub/archive'
+ '/fedora-secondary/releases/35/Server/s390x/os'
+ '/images/initrd.img')
+ initrd_hash = 'a122057d95725ac030e2ec51df46e172'
+ initrd_path_xz = self.fetch_asset(initrd_url, algorithm='md5',
+ asset_hash=initrd_hash)
+ initrd_path = os.path.join(self.workdir, 'initrd-raw.img')
+ archive.lzma_uncompress(initrd_path_xz, initrd_path)
+
+ self.vm.set_console()
+ kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
+ self.vm.add_args('-nographic',
+ '-enable-kvm',
+ '-cpu', 'max,ctop=on',
+ '-m', '512',
+ '-kernel', kernel_path,
+ '-initrd', initrd_path,
+ '-append', kernel_command_line)
+
+ def system_init(self):
+ self.log.info("System init")
+ exec_command_and_wait_for_pattern(self,
+ """ mount proc -t proc /proc;
+ mount sys -t sysfs /sys;
+ cat /sys/devices/system/cpu/dispatching """,
+ '0')
+
+ def test_single(self):
+ """
+ This test checks the simplest topology with a single CPU.
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ def test_default(self):
+ """
+ This test checks the implicit topology.
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.add_args('-smp',
+ '13,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+ self.vm.launch()
+ self.wait_until_booted()
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+ self.check_topology(1, 0, 0, 0, 'medium', False)
+ self.check_topology(2, 1, 0, 0, 'medium', False)
+ self.check_topology(3, 1, 0, 0, 'medium', False)
+ self.check_topology(4, 2, 0, 0, 'medium', False)
+ self.check_topology(5, 2, 0, 0, 'medium', False)
+ self.check_topology(6, 0, 1, 0, 'medium', False)
+ self.check_topology(7, 0, 1, 0, 'medium', False)
+ self.check_topology(8, 1, 1, 0, 'medium', False)
+ self.check_topology(9, 1, 1, 0, 'medium', False)
+ self.check_topology(10, 2, 1, 0, 'medium', False)
+ self.check_topology(11, 2, 1, 0, 'medium', False)
+ self.check_topology(12, 0, 0, 1, 'medium', False)
+
+ def test_move(self):
+ """
+ This test checks the topology modification by moving a CPU
+ to another socket: CPU 0 is moved from socket 0 to socket 2.
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.add_args('-smp',
+ '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'socket-id': 2, 'entitlement': 'low'})
+ self.assertEqual(res['return'], {})
+ self.check_topology(0, 2, 0, 0, 'low', False)
+
+ def test_dash_device(self):
+ """
+ This test verifies that a CPU defined with the '-device'
+ command line option finds its right place inside the topology.
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.add_args('-smp',
+ '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+ self.vm.add_args('-device', 'max-s390x-cpu,core-id=10')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=1,socket-id=0,book-id=1,drawer-id=1,entitlement=low')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=2,socket-id=0,book-id=1,drawer-id=1,entitlement=medium')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=3,socket-id=1,book-id=1,drawer-id=1,entitlement=high')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=4,socket-id=1,book-id=1,drawer-id=1')
+ self.vm.add_args('-device',
+ 'max-s390x-cpu,'
+ 'core-id=5,socket-id=2,book-id=1,drawer-id=1,dedicated=true')
+
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.check_topology(10, 2, 1, 0, 'medium', False)
+ self.check_topology(1, 0, 1, 1, 'low', False)
+ self.check_topology(2, 0, 1, 1, 'medium', False)
+ self.check_topology(3, 1, 1, 1, 'high', False)
+ self.check_topology(4, 1, 1, 1, 'medium', False)
+ self.check_topology(5, 2, 1, 1, 'high', True)
+
+
+ def guest_set_dispatching(self, dispatching):
+ exec_command(self,
+ f'echo {dispatching} > /sys/devices/system/cpu/dispatching')
+ self.vm.event_wait('CPU_POLARIZATION_CHANGE', self.event_timeout)
+ exec_command_and_wait_for_pattern(self,
+ 'cat /sys/devices/system/cpu/dispatching', dispatching)
+
+
+ def test_polarization(self):
+ """
+ This test verifies that QEMU modifies the entitlement change after
+ several guest polarization change requests.
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+ res = self.vm.qmp('query-s390x-cpu-polarization')
+ self.assertEqual(res['return']['polarization'], 'horizontal')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ self.guest_set_dispatching('1');
+ res = self.vm.qmp('query-s390x-cpu-polarization')
+ self.assertEqual(res['return']['polarization'], 'vertical')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ self.guest_set_dispatching('0');
+ res = self.vm.qmp('query-s390x-cpu-polarization')
+ self.assertEqual(res['return']['polarization'], 'horizontal')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+
+ def check_polarization(self, polarization):
+ #We need to wait for the change to have been propagated to the kernel
+ exec_command_and_wait_for_pattern(self,
+ "\n".join([
+ "timeout 1 sh -c 'while true",
+ 'do',
+ ' syspath="/sys/devices/system/cpu/cpu0/polarization"',
+ ' polarization="$(cat "$syspath")" || exit',
+ f' if [ "$polarization" = "{polarization}" ]; then',
+ ' exit 0',
+ ' fi',
+ ' sleep 0.01',
+ #searched for strings mustn't show up in command, '' to obfuscate
+ "done' && echo succ''ess || echo fail''ure",
+ ]),
+ "success", "failure")
+
+
+ def test_entitlement(self):
+ """
+ This test verifies that QEMU modifies the entitlement
+ after a guest request and that the guest sees the change.
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ self.check_polarization('horizontal')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ self.guest_set_dispatching('1')
+ self.check_polarization('vertical:medium')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'low'})
+ self.assertEqual(res['return'], {})
+ self.check_polarization('vertical:low')
+ self.check_topology(0, 0, 0, 0, 'low', False)
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'medium'})
+ self.assertEqual(res['return'], {})
+ self.check_polarization('vertical:medium')
+ self.check_topology(0, 0, 0, 0, 'medium', False)
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'high'})
+ self.assertEqual(res['return'], {})
+ self.check_polarization('vertical:high')
+ self.check_topology(0, 0, 0, 0, 'high', False)
+
+ self.guest_set_dispatching('0');
+ self.check_polarization("horizontal")
+ self.check_topology(0, 0, 0, 0, 'high', False)
+
+
+ def test_dedicated(self):
+ """
+ This test verifies that QEMU adjusts the entitlement correctly when a
+ CPU is made dedicated.
+ QEMU retains the entitlement value when horizontal polarization is in effect.
+ For the guest, the field shows the effective value of the entitlement.
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ self.check_polarization("horizontal")
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'dedicated': True})
+ self.assertEqual(res['return'], {})
+ self.check_topology(0, 0, 0, 0, 'high', True)
+ self.check_polarization("horizontal")
+
+ self.guest_set_dispatching('1');
+ self.check_topology(0, 0, 0, 0, 'high', True)
+ self.check_polarization("vertical:high")
+
+ self.guest_set_dispatching('0');
+ self.check_topology(0, 0, 0, 0, 'high', True)
+ self.check_polarization("horizontal")
+
+
+ def test_socket_full(self):
+ """
+ This test verifies that QEMU does not accept to overload a socket.
+ The socket-id 0 on book-id 0 already contains CPUs 0 and 1 and can
+ not accept any new CPU while socket-id 0 on book-id 1 is free.
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.add_args('-smp',
+ '3,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 2, 'socket-id': 0, 'book-id': 0})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 2, 'socket-id': 0, 'book-id': 1})
+ self.assertEqual(res['return'], {})
+
+ def test_dedicated_error(self):
+ """
+ This test verifies that QEMU refuses to lower the entitlement
+ of a dedicated CPU
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'dedicated': True})
+ self.assertEqual(res['return'], {})
+
+ self.check_topology(0, 0, 0, 0, 'high', True)
+
+ self.guest_set_dispatching('1');
+
+ self.check_topology(0, 0, 0, 0, 'high', True)
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'low', 'dedicated': True})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'low'})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'medium', 'dedicated': True})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'medium'})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'low', 'dedicated': False})
+ self.assertEqual(res['return'], {})
+
+ res = self.vm.qmp('set-cpu-topology',
+ {'core-id': 0, 'entitlement': 'medium', 'dedicated': False})
+ self.assertEqual(res['return'], {})
+
+ def test_move_error(self):
+ """
+ This test verifies that QEMU refuses to move a CPU to an
+ nonexistent location
+
+ :avocado: tags=arch:s390x
+ :avocado: tags=machine:s390-ccw-virtio
+ """
+ self.kernel_init()
+ self.vm.launch()
+ self.wait_until_booted()
+
+ self.system_init()
+
+ res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'drawer-id': 1})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'book-id': 1})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'socket-id': 1})
+ self.assertEqual(res['error']['class'], 'GenericError')
+
+ self.check_topology(0, 0, 0, 0, 'medium', False)
diff --git a/tests/data/acpi/q35/DSDT.cxl b/tests/data/acpi/q35/DSDT.cxl
index ee16a861c2..145301c52a 100644
--- a/tests/data/acpi/q35/DSDT.cxl
+++ b/tests/data/acpi/q35/DSDT.cxl
Binary files differ
diff --git a/tests/qtest/cdrom-test.c b/tests/qtest/cdrom-test.c
index f2a8d91929..0945383789 100644
--- a/tests/qtest/cdrom-test.c
+++ b/tests/qtest/cdrom-test.c
@@ -37,17 +37,17 @@ static int exec_xorrisofs(const char **args)
return exit_status;
}
-static int prepare_image(const char *arch, char *isoimage)
+static int prepare_image(const char *arch, char *isoimagepath)
{
char srcdir[] = "cdrom-test-dir-XXXXXX";
char *codefile = NULL;
int ifh, ret = -1;
const char *args[] = {
"xorrisofs", "-quiet", "-l", "-no-emul-boot",
- "-b", NULL, "-o", isoimage, srcdir, NULL
+ "-b", NULL, "-o", isoimagepath, srcdir, NULL
};
- ifh = mkstemp(isoimage);
+ ifh = mkstemp(isoimagepath);
if (ifh < 0) {
perror("Error creating temporary iso image file");
return -1;
diff --git a/tests/qtest/ipmi-bt-test.c b/tests/qtest/ipmi-bt-test.c
index ed431e34e6..383239bcd4 100644
--- a/tests/qtest/ipmi-bt-test.c
+++ b/tests/qtest/ipmi-bt-test.c
@@ -411,7 +411,7 @@ int main(int argc, char **argv)
g_test_init(&argc, &argv, NULL);
global_qtest = qtest_initf(
- " -chardev socket,id=ipmi0,host=localhost,port=%d,reconnect=10"
+ " -chardev socket,id=ipmi0,host=127.0.0.1,port=%d,reconnect=10"
" -device ipmi-bmc-extern,chardev=ipmi0,id=bmc0"
" -device isa-ipmi-bt,bmc=bmc0", emu_port);
qtest_irq_intercept_in(global_qtest, "ioapic");
diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
index dc7a55634c..f33a210861 100644
--- a/tests/qtest/libqtest.c
+++ b/tests/qtest/libqtest.c
@@ -91,6 +91,7 @@ struct QTestState
static GHookList abrt_hooks;
static void (*sighandler_old)(int);
+static bool silence_spawn_log;
static int qtest_query_target_endianness(QTestState *s);
@@ -336,10 +337,17 @@ void qtest_remove_abrt_handler(void *data)
}
}
-static const char *qtest_qemu_binary(void)
+static const char *qtest_qemu_binary(const char *var)
{
const char *qemu_bin;
+ if (var) {
+ qemu_bin = getenv(var);
+ if (qemu_bin) {
+ return qemu_bin;
+ }
+ }
+
qemu_bin = getenv("QTEST_QEMU_BINARY");
if (!qemu_bin) {
fprintf(stderr, "Environment variable QTEST_QEMU_BINARY required\n");
@@ -381,7 +389,8 @@ static pid_t qtest_create_process(char *cmd)
}
#endif /* _WIN32 */
-static QTestState *G_GNUC_PRINTF(1, 2) qtest_spawn_qemu(const char *fmt, ...)
+static QTestState *G_GNUC_PRINTF(2, 3) qtest_spawn_qemu(const char *qemu_bin,
+ const char *fmt, ...)
{
va_list ap;
QTestState *s = g_new0(QTestState, 1);
@@ -391,14 +400,15 @@ static QTestState *G_GNUC_PRINTF(1, 2) qtest_spawn_qemu(const char *fmt, ...)
g_autoptr(GString) command = g_string_new("");
va_start(ap, fmt);
- g_string_append_printf(command, CMD_EXEC "%s %s",
- qtest_qemu_binary(), tracearg);
+ g_string_append_printf(command, CMD_EXEC "%s %s", qemu_bin, tracearg);
g_string_append_vprintf(command, fmt, ap);
va_end(ap);
qtest_add_abrt_handler(kill_qemu_hook_func, s);
- g_test_message("starting QEMU: %s", command->str);
+ if (!silence_spawn_log) {
+ g_test_message("starting QEMU: %s", command->str);
+ }
#ifndef _WIN32
s->qemu_pid = fork();
@@ -431,7 +441,8 @@ static QTestState *G_GNUC_PRINTF(1, 2) qtest_spawn_qemu(const char *fmt, ...)
return s;
}
-QTestState *qtest_init_without_qmp_handshake(const char *extra_args)
+static QTestState *qtest_init_internal(const char *qemu_bin,
+ const char *extra_args)
{
QTestState *s;
int sock, qmpsock, i;
@@ -456,7 +467,8 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args)
sock = init_socket(socket_path);
qmpsock = init_socket(qmp_socket_path);
- s = qtest_spawn_qemu("-qtest unix:%s "
+ s = qtest_spawn_qemu(qemu_bin,
+ "-qtest unix:%s "
"-qtest-log %s "
"-chardev socket,path=%s,id=char0 "
"-mon chardev=char0,mode=control "
@@ -509,9 +521,14 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args)
return s;
}
-QTestState *qtest_init(const char *extra_args)
+QTestState *qtest_init_without_qmp_handshake(const char *extra_args)
{
- QTestState *s = qtest_init_without_qmp_handshake(extra_args);
+ return qtest_init_internal(qtest_qemu_binary(NULL), extra_args);
+}
+
+QTestState *qtest_init_with_env(const char *var, const char *extra_args)
+{
+ QTestState *s = qtest_init_internal(qtest_qemu_binary(var), extra_args);
QDict *greeting;
/* Read the QMP greeting and then do the handshake */
@@ -522,6 +539,11 @@ QTestState *qtest_init(const char *extra_args)
return s;
}
+QTestState *qtest_init(const char *extra_args)
+{
+ return qtest_init_with_env(NULL, extra_args);
+}
+
QTestState *qtest_vinitf(const char *fmt, va_list ap)
{
char *args = g_strdup_vprintf(fmt, ap);
@@ -905,7 +927,7 @@ char *qtest_hmp(QTestState *s, const char *fmt, ...)
const char *qtest_get_arch(void)
{
- const char *qemu = qtest_qemu_binary();
+ const char *qemu = qtest_qemu_binary(NULL);
const char *end = strrchr(qemu, '-');
if (!end) {
@@ -1449,13 +1471,26 @@ struct MachInfo {
char *alias;
};
+static void qtest_free_machine_list(struct MachInfo *machines)
+{
+ if (machines) {
+ for (int i = 0; machines[i].name != NULL; i++) {
+ g_free(machines[i].name);
+ g_free(machines[i].alias);
+ }
+
+ g_free(machines);
+ }
+}
+
/*
* Returns an array with pointers to the available machine names.
* The terminating entry has the name set to NULL.
*/
-static struct MachInfo *qtest_get_machines(void)
+static struct MachInfo *qtest_get_machines(const char *var)
{
static struct MachInfo *machines;
+ static char *qemu_var;
QDict *response, *minfo;
QList *list;
const QListEntry *p;
@@ -1464,11 +1499,21 @@ static struct MachInfo *qtest_get_machines(void)
QTestState *qts;
int idx;
+ if (g_strcmp0(qemu_var, var)) {
+ qemu_var = g_strdup(var);
+
+ /* new qemu, clear the cache */
+ qtest_free_machine_list(machines);
+ machines = NULL;
+ }
+
if (machines) {
return machines;
}
- qts = qtest_init("-machine none");
+ silence_spawn_log = !g_test_verbose();
+
+ qts = qtest_init_with_env(qemu_var, "-machine none");
response = qtest_qmp(qts, "{ 'execute': 'query-machines' }");
g_assert(response);
list = qdict_get_qlist(response, "return");
@@ -1499,6 +1544,8 @@ static struct MachInfo *qtest_get_machines(void)
qtest_quit(qts);
qobject_unref(response);
+ silence_spawn_log = false;
+
memset(&machines[idx], 0, sizeof(struct MachInfo)); /* Terminating entry */
return machines;
}
@@ -1509,7 +1556,7 @@ void qtest_cb_for_every_machine(void (*cb)(const char *machine),
struct MachInfo *machines;
int i;
- machines = qtest_get_machines();
+ machines = qtest_get_machines(NULL);
for (i = 0; machines[i].name != NULL; i++) {
/* Ignore machines that cannot be used for qtests */
@@ -1525,12 +1572,28 @@ void qtest_cb_for_every_machine(void (*cb)(const char *machine),
}
}
-bool qtest_has_machine(const char *machine)
+char *qtest_resolve_machine_alias(const char *var, const char *alias)
{
struct MachInfo *machines;
int i;
- machines = qtest_get_machines();
+ machines = qtest_get_machines(var);
+
+ for (i = 0; machines[i].name != NULL; i++) {
+ if (machines[i].alias && g_str_equal(alias, machines[i].alias)) {
+ return g_strdup(machines[i].name);
+ }
+ }
+
+ return NULL;
+}
+
+bool qtest_has_machine_with_env(const char *var, const char *machine)
+{
+ struct MachInfo *machines;
+ int i;
+
+ machines = qtest_get_machines(var);
for (i = 0; machines[i].name != NULL; i++) {
if (g_str_equal(machine, machines[i].name) ||
@@ -1542,6 +1605,11 @@ bool qtest_has_machine(const char *machine)
return false;
}
+bool qtest_has_machine(const char *machine)
+{
+ return qtest_has_machine_with_env(NULL, machine);
+}
+
bool qtest_has_device(const char *device)
{
static QList *list;
diff --git a/tests/qtest/libqtest.h b/tests/qtest/libqtest.h
index 5fe3d13466..6e3d3525bf 100644
--- a/tests/qtest/libqtest.h
+++ b/tests/qtest/libqtest.h
@@ -56,6 +56,19 @@ QTestState *qtest_vinitf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0);
QTestState *qtest_init(const char *extra_args);
/**
+ * qtest_init_with_env:
+ * @var: Environment variable from where to take the QEMU binary
+ * @extra_args: Other arguments to pass to QEMU. CAUTION: these
+ * arguments are subject to word splitting and shell evaluation.
+ *
+ * Like qtest_init(), but use a different environment variable for the
+ * QEMU binary.
+ *
+ * Returns: #QTestState instance.
+ */
+QTestState *qtest_init_with_env(const char *var, const char *extra_args);
+
+/**
* qtest_init_without_qmp_handshake:
* @extra_args: other arguments to pass to QEMU. CAUTION: these
* arguments are subject to word splitting and shell evaluation.
@@ -910,6 +923,16 @@ void qtest_cb_for_every_machine(void (*cb)(const char *machine),
bool skip_old_versioned);
/**
+ * qtest_resolve_machine_alias:
+ * @var: Environment variable from where to take the QEMU binary
+ * @alias: The alias to resolve
+ *
+ * Returns: the machine type corresponding to the alias if any,
+ * otherwise NULL.
+ */
+char *qtest_resolve_machine_alias(const char *var, const char *alias);
+
+/**
* qtest_has_machine:
* @machine: The machine to look for
*
@@ -918,6 +941,15 @@ void qtest_cb_for_every_machine(void (*cb)(const char *machine),
bool qtest_has_machine(const char *machine);
/**
+ * qtest_has_machine_with_env:
+ * @var: Environment variable from where to take the QEMU binary
+ * @machine: The machine to look for
+ *
+ * Returns: true if the machine is available in the specified binary.
+ */
+bool qtest_has_machine_with_env(const char *var, const char *machine);
+
+/**
* qtest_has_device:
* @device: The device to look for
*
diff --git a/tests/qtest/migration-helpers.c b/tests/qtest/migration-helpers.c
index 0c185db450..24fb7b3525 100644
--- a/tests/qtest/migration-helpers.c
+++ b/tests/qtest/migration-helpers.c
@@ -11,6 +11,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/ctype.h"
#include "qapi/qmp/qjson.h"
#include "migration-helpers.h"
@@ -240,3 +241,54 @@ void wait_for_migration_fail(QTestState *from, bool allow_active)
g_assert(qdict_get_bool(rsp_return, "running"));
qobject_unref(rsp_return);
}
+
+char *find_common_machine_version(const char *mtype, const char *var1,
+ const char *var2)
+{
+ g_autofree char *type1 = qtest_resolve_machine_alias(var1, mtype);
+ g_autofree char *type2 = qtest_resolve_machine_alias(var2, mtype);
+
+ g_assert(type1 && type2);
+
+ if (g_str_equal(type1, type2)) {
+ /* either can be used */
+ return g_strdup(type1);
+ }
+
+ if (qtest_has_machine_with_env(var2, type1)) {
+ return g_strdup(type1);
+ }
+
+ if (qtest_has_machine_with_env(var1, type2)) {
+ return g_strdup(type2);
+ }
+
+ g_test_message("No common machine version for machine type '%s' between "
+ "binaries %s and %s", mtype, getenv(var1), getenv(var2));
+ g_assert_not_reached();
+}
+
+char *resolve_machine_version(const char *alias, const char *var1,
+ const char *var2)
+{
+ const char *mname = g_getenv("QTEST_QEMU_MACHINE_TYPE");
+ g_autofree char *machine_name = NULL;
+
+ if (mname) {
+ const char *dash = strrchr(mname, '-');
+ const char *dot = strrchr(mname, '.');
+
+ machine_name = g_strdup(mname);
+
+ if (dash && dot) {
+ assert(qtest_has_machine(machine_name));
+ return g_steal_pointer(&machine_name);
+ }
+ /* else: probably an alias, let it be resolved below */
+ } else {
+ /* use the hardcoded alias */
+ machine_name = g_strdup(alias);
+ }
+
+ return find_common_machine_version(machine_name, var1, var2);
+}
diff --git a/tests/qtest/migration-helpers.h b/tests/qtest/migration-helpers.h
index 4f51d0f8bc..e31dc85cc7 100644
--- a/tests/qtest/migration-helpers.h
+++ b/tests/qtest/migration-helpers.h
@@ -43,4 +43,8 @@ void wait_for_migration_complete(QTestState *who);
void wait_for_migration_fail(QTestState *from, bool allow_active);
+char *find_common_machine_version(const char *mtype, const char *var1,
+ const char *var2);
+char *resolve_machine_version(const char *alias, const char *var1,
+ const char *var2);
#endif /* MIGRATION_HELPERS_H */
diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
index e1c110537b..bc70a14642 100644
--- a/tests/qtest/migration-test.c
+++ b/tests/qtest/migration-test.c
@@ -71,6 +71,8 @@ static bool got_dst_resume;
#define QEMU_VM_FILE_MAGIC 0x5145564d
#define FILE_TEST_FILENAME "migfile"
#define FILE_TEST_OFFSET 0x1000
+#define QEMU_ENV_SRC "QTEST_QEMU_BINARY_SRC"
+#define QEMU_ENV_DST "QTEST_QEMU_BINARY_DST"
#if defined(__linux__)
#include <sys/syscall.h>
@@ -743,6 +745,8 @@ static int test_migrate_start(QTestState **from, QTestState **to,
const char *kvm_opts = NULL;
const char *arch = qtest_get_arch();
const char *memory_size;
+ const char *machine_alias, *machine_opts = "";
+ g_autofree char *machine = NULL;
if (args->use_shmem) {
if (!g_file_test("/dev/shm", G_FILE_TEST_IS_DIR)) {
@@ -755,11 +759,20 @@ static int test_migrate_start(QTestState **from, QTestState **to,
got_dst_resume = false;
if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
memory_size = "150M";
- arch_opts = g_strdup_printf("-drive file=%s,format=raw", bootpath);
+
+ if (g_str_equal(arch, "i386")) {
+ machine_alias = "pc";
+ } else {
+ machine_alias = "q35";
+ }
+ arch_opts = g_strdup_printf(
+ "-drive if=none,id=d0,file=%s,format=raw "
+ "-device ide-hd,drive=d0,secs=1,cyls=1,heads=1", bootpath);
start_address = X86_TEST_MEM_START;
end_address = X86_TEST_MEM_END;
} else if (g_str_equal(arch, "s390x")) {
memory_size = "128M";
+ machine_alias = "s390-ccw-virtio";
arch_opts = g_strdup_printf("-bios %s", bootpath);
start_address = S390_TEST_MEM_START;
end_address = S390_TEST_MEM_END;
@@ -771,11 +784,14 @@ static int test_migrate_start(QTestState **from, QTestState **to,
"'nvramrc=hex .\" _\" begin %x %x "
"do i c@ 1 + i c! 1000 +loop .\" B\" 0 "
"until'", end_address, start_address);
- arch_opts = g_strdup("-nodefaults -machine vsmt=8");
+ machine_alias = "pseries";
+ machine_opts = "vsmt=8";
+ arch_opts = g_strdup("-nodefaults");
} else if (strcmp(arch, "aarch64") == 0) {
memory_size = "150M";
- arch_opts = g_strdup_printf("-machine virt,gic-version=max -cpu max "
- "-kernel %s", bootpath);
+ machine_alias = "virt";
+ machine_opts = "gic-version=max";
+ arch_opts = g_strdup_printf("-cpu max -kernel %s", bootpath);
start_address = ARM_TEST_MEM_START;
end_address = ARM_TEST_MEM_END;
} else {
@@ -809,12 +825,19 @@ static int test_migrate_start(QTestState **from, QTestState **to,
kvm_opts = ",dirty-ring-size=4096";
}
+ machine = resolve_machine_version(machine_alias, QEMU_ENV_SRC,
+ QEMU_ENV_DST);
+
+ g_test_message("Using machine type: %s", machine);
+
cmd_source = g_strdup_printf("-accel kvm%s -accel tcg "
+ "-machine %s,%s "
"-name source,debug-threads=on "
"-m %s "
"-serial file:%s/src_serial "
"%s %s %s %s %s",
kvm_opts ? kvm_opts : "",
+ machine, machine_opts,
memory_size, tmpfs,
arch_opts ? arch_opts : "",
arch_source ? arch_source : "",
@@ -822,26 +845,28 @@ static int test_migrate_start(QTestState **from, QTestState **to,
args->opts_source ? args->opts_source : "",
ignore_stderr);
if (!args->only_target) {
- *from = qtest_init(cmd_source);
+ *from = qtest_init_with_env(QEMU_ENV_SRC, cmd_source);
qtest_qmp_set_event_callback(*from,
migrate_watch_for_stop,
&got_src_stop);
}
cmd_target = g_strdup_printf("-accel kvm%s -accel tcg "
+ "-machine %s,%s "
"-name target,debug-threads=on "
"-m %s "
"-serial file:%s/dest_serial "
"-incoming %s "
"%s %s %s %s %s",
kvm_opts ? kvm_opts : "",
+ machine, machine_opts,
memory_size, tmpfs, uri,
arch_opts ? arch_opts : "",
arch_target ? arch_target : "",
shmem_opts ? shmem_opts : "",
args->opts_target ? args->opts_target : "",
ignore_stderr);
- *to = qtest_init(cmd_target);
+ *to = qtest_init_with_env(QEMU_ENV_DST, cmd_target);
qtest_qmp_set_event_callback(*to,
migrate_watch_for_resume,
&got_dst_resume);
@@ -2972,10 +2997,23 @@ int main(int argc, char **argv)
bool has_uffd;
const char *arch;
g_autoptr(GError) err = NULL;
+ const char *qemu_src = getenv(QEMU_ENV_SRC);
+ const char *qemu_dst = getenv(QEMU_ENV_DST);
int ret;
g_test_init(&argc, &argv, NULL);
+ /*
+ * The default QTEST_QEMU_BINARY must always be provided because
+ * that is what helpers use to query the accel type and
+ * architecture.
+ */
+ if (qemu_src && qemu_dst) {
+ g_test_message("Only one of %s, %s is allowed",
+ QEMU_ENV_SRC, QEMU_ENV_DST);
+ exit(1);
+ }
+
has_kvm = qtest_has_accel("kvm");
has_tcg = qtest_has_accel("tcg");
@@ -3034,7 +3072,9 @@ int main(int argc, char **argv)
qtest_add_func("/migration/bad_dest", test_baddest);
#ifndef _WIN32
- qtest_add_func("/migration/analyze-script", test_analyze_script);
+ if (!g_str_equal(arch, "s390x")) {
+ qtest_add_func("/migration/analyze-script", test_analyze_script);
+ }
#endif
qtest_add_func("/migration/precopy/unix/plain", test_precopy_unix_plain);
qtest_add_func("/migration/precopy/unix/xbzrle", test_precopy_unix_xbzrle);
diff --git a/tests/qtest/npcm7xx_adc-test.c b/tests/qtest/npcm7xx_adc-test.c
index 8048044d28..e751a72e36 100644
--- a/tests/qtest/npcm7xx_adc-test.c
+++ b/tests/qtest/npcm7xx_adc-test.c
@@ -90,7 +90,7 @@ typedef struct ADC {
uint64_t base_addr;
} ADC;
-ADC adc = {
+ADC adc_defs = {
.irq = 0,
.base_addr = 0xf000c000
};
@@ -367,12 +367,12 @@ int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
- add_test(init, &adc);
- add_test(convert_internal, &adc);
- add_test(convert_external, &adc);
- add_test(interrupt, &adc);
- add_test(reset, &adc);
- add_test(calibrate, &adc);
+ add_test(init, &adc_defs);
+ add_test(convert_internal, &adc_defs);
+ add_test(convert_external, &adc_defs);
+ add_test(interrupt, &adc_defs);
+ add_test(reset, &adc_defs);
+ add_test(calibrate, &adc_defs);
return g_test_run();
}
diff --git a/tests/qtest/rtl8139-test.c b/tests/qtest/rtl8139-test.c
index 4dc0a0d22e..eedf90f65a 100644
--- a/tests/qtest/rtl8139-test.c
+++ b/tests/qtest/rtl8139-test.c
@@ -22,7 +22,7 @@ static void nop(void)
#define CLK 33333333
static QPCIBus *pcibus;
-static QPCIDevice *dev;
+static QPCIDevice *pcidev;
static QPCIBar dev_bar;
static void save_fn(QPCIDevice *dev, int devfn, void *data)
@@ -46,7 +46,7 @@ static QPCIDevice *get_device(void)
#define PORT(name, len, val) \
static unsigned __attribute__((unused)) in_##name(void) \
{ \
- unsigned res = qpci_io_read##len(dev, dev_bar, (val)); \
+ unsigned res = qpci_io_read##len(pcidev, dev_bar, (val)); \
if (verbosity_level >= 2) { \
g_test_message("*%s -> %x", #name, res); \
} \
@@ -57,7 +57,7 @@ static void out_##name(unsigned v) \
if (verbosity_level >= 2) { \
g_test_message("%x -> *%s", v, #name); \
} \
- qpci_io_write##len(dev, dev_bar, (val), v); \
+ qpci_io_write##len(pcidev, dev_bar, (val), v); \
}
PORT(Timer, l, 0x48)
@@ -189,11 +189,11 @@ static void test_init(void)
{
uint64_t barsize;
- dev = get_device();
+ pcidev = get_device();
- dev_bar = qpci_iomap(dev, 0, &barsize);
+ dev_bar = qpci_iomap(pcidev, 0, &barsize);
- qpci_device_enable(dev);
+ qpci_device_enable(pcidev);
test_timer();
}
diff --git a/tests/qtest/virtio-scsi-test.c b/tests/qtest/virtio-scsi-test.c
index ceaa7f2415..db10d572d0 100644
--- a/tests/qtest/virtio-scsi-test.c
+++ b/tests/qtest/virtio-scsi-test.c
@@ -156,7 +156,7 @@ static QVirtioSCSIQueues *qvirtio_scsi_init(QVirtioDevice *dev)
return vs;
}
-static void hotplug(void *obj, void *data, QGuestAllocator *alloc)
+static void hotplug(void *obj, void *data, QGuestAllocator *t_alloc)
{
QTestState *qts = global_qtest;
diff --git a/tests/tcg/i386/test-avx.c b/tests/tcg/i386/test-avx.c
index c39c0e5bce..230e6d84b8 100644
--- a/tests/tcg/i386/test-avx.c
+++ b/tests/tcg/i386/test-avx.c
@@ -236,12 +236,15 @@ v4di val_i64[] = {
v4di deadbeef = {0xa5a5a5a5deadbeefull, 0xa5a5a5a5deadbeefull,
0xa5a5a5a5deadbeefull, 0xa5a5a5a5deadbeefull};
-v4di indexq = {0x000000000000001full, 0x000000000000008full,
- 0xffffffffffffffffull, 0xffffffffffffff5full};
-v4di indexd = {0x00000002000000efull, 0xfffffff500000010ull,
- 0x0000000afffffff0ull, 0x000000000000000eull};
+/* &gather_mem[0x10] is 512 bytes from the base; indices must be >=-64, <64
+ * to account for scaling by 8 */
+v4di indexq = {0x000000000000001full, 0x000000000000003dull,
+ 0xffffffffffffffffull, 0xffffffffffffffdfull};
+v4di indexd = {0x00000002ffffffcdull, 0xfffffff500000010ull,
+ 0x0000003afffffff0ull, 0x000000000000000eull};
v4di gather_mem[0x20];
+_Static_assert(sizeof(gather_mem) == 1024);
void init_f16reg(v4di *r)
{
@@ -316,6 +319,8 @@ int main(int argc, char *argv[])
int i;
init_all(&initI);
+ init_intreg(&initI.ymm[0]);
+ init_intreg(&initI.ymm[9]);
init_intreg(&initI.ymm[10]);
init_intreg(&initI.ymm[11]);
init_intreg(&initI.ymm[12]);
@@ -324,6 +329,8 @@ int main(int argc, char *argv[])
dump_regs(&initI);
init_all(&initF16);
+ init_f16reg(&initF16.ymm[0]);
+ init_f16reg(&initF16.ymm[9]);
init_f16reg(&initF16.ymm[10]);
init_f16reg(&initF16.ymm[11]);
init_f16reg(&initF16.ymm[12]);
@@ -333,6 +340,8 @@ int main(int argc, char *argv[])
dump_regs(&initF16);
init_all(&initF32);
+ init_f32reg(&initF32.ymm[0]);
+ init_f32reg(&initF32.ymm[9]);
init_f32reg(&initF32.ymm[10]);
init_f32reg(&initF32.ymm[11]);
init_f32reg(&initF32.ymm[12]);
@@ -342,6 +351,8 @@ int main(int argc, char *argv[])
dump_regs(&initF32);
init_all(&initF64);
+ init_f64reg(&initF64.ymm[0]);
+ init_f64reg(&initF64.ymm[9]);
init_f64reg(&initF64.ymm[10]);
init_f64reg(&initF64.ymm[11]);
init_f64reg(&initF64.ymm[12]);
diff --git a/tests/tcg/i386/test-avx.py b/tests/tcg/i386/test-avx.py
index 641a2ef69e..6063fb2d11 100755
--- a/tests/tcg/i386/test-avx.py
+++ b/tests/tcg/i386/test-avx.py
@@ -9,7 +9,7 @@ from fnmatch import fnmatch
archs = [
"SSE", "SSE2", "SSE3", "SSSE3", "SSE4_1", "SSE4_2",
"AES", "AVX", "AVX2", "AES+AVX", "VAES+AVX",
- "F16C", "FMA",
+ "F16C", "FMA", "SHA",
]
ignore = set(["FISTTP",
@@ -43,6 +43,7 @@ imask = {
'vPS[LR][AL][WDQ]': 0x3f,
'vPS[RL]LDQ': 0x1f,
'vROUND[PS][SD]': 0x7,
+ 'SHA1RNDS4': 0x03,
'vSHUFPD': 0x0f,
'vSHUFPS': 0xff,
'vAESKEYGENASSIST': 0xff,
diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c
index 71ed31a4db..337b6e4ea7 100644
--- a/tests/unit/test-aio.c
+++ b/tests/unit/test-aio.c
@@ -127,10 +127,10 @@ static void *test_acquire_thread(void *opaque)
return NULL;
}
-static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
+static void set_event_notifier(AioContext *nctx, EventNotifier *notifier,
EventNotifierHandler *handler)
{
- aio_set_event_notifier(ctx, notifier, handler, NULL, NULL);
+ aio_set_event_notifier(nctx, notifier, handler, NULL, NULL);
}
static void dummy_notifier_read(EventNotifier *n)
diff --git a/tests/unit/test-coroutine.c b/tests/unit/test-coroutine.c
index a2563647e7..49d4d9b251 100644
--- a/tests/unit/test-coroutine.c
+++ b/tests/unit/test-coroutine.c
@@ -195,7 +195,7 @@ static void test_no_dangling_access(void)
}
static bool locked;
-static int done;
+static int done_count;
static void coroutine_fn mutex_fn(void *opaque)
{
@@ -206,7 +206,7 @@ static void coroutine_fn mutex_fn(void *opaque)
qemu_coroutine_yield();
locked = false;
qemu_co_mutex_unlock(m);
- done++;
+ done_count++;
}
static void coroutine_fn lockable_fn(void *opaque)
@@ -218,7 +218,7 @@ static void coroutine_fn lockable_fn(void *opaque)
qemu_coroutine_yield();
locked = false;
qemu_lockable_unlock(x);
- done++;
+ done_count++;
}
static void do_test_co_mutex(CoroutineEntry *entry, void *opaque)
@@ -226,7 +226,7 @@ static void do_test_co_mutex(CoroutineEntry *entry, void *opaque)
Coroutine *c1 = qemu_coroutine_create(entry, opaque);
Coroutine *c2 = qemu_coroutine_create(entry, opaque);
- done = 0;
+ done_count = 0;
qemu_coroutine_enter(c1);
g_assert(locked);
qemu_coroutine_enter(c2);
@@ -235,11 +235,11 @@ static void do_test_co_mutex(CoroutineEntry *entry, void *opaque)
* terminates.
*/
qemu_coroutine_enter(c1);
- g_assert_cmpint(done, ==, 1);
+ g_assert_cmpint(done_count, ==, 1);
g_assert(locked);
qemu_coroutine_enter(c2);
- g_assert_cmpint(done, ==, 2);
+ g_assert_cmpint(done_count, ==, 2);
g_assert(!locked);
}
diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c
index ac35d65d19..2146cfacd3 100644
--- a/tests/unit/test-throttle.c
+++ b/tests/unit/test-throttle.c
@@ -618,7 +618,6 @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
{ THROTTLE_OPS_TOTAL,
THROTTLE_OPS_READ,
THROTTLE_OPS_WRITE, } };
- ThrottleConfig cfg;
BucketType index;
int i;
diff --git a/tests/vm/freebsd b/tests/vm/freebsd
index ac51376c82..b581bd17fb 100755
--- a/tests/vm/freebsd
+++ b/tests/vm/freebsd
@@ -38,8 +38,9 @@ class FreeBSDVM(basevm.BaseVM):
cd $(mktemp -d /home/qemu/qemu-test.XXXXXX);
mkdir src build; cd src;
tar -xf /dev/vtbd1;
- cd ../build
- ../src/configure --python=python3.9 {configure_opts};
+ cd ../build;
+ ../src/configure --python=python3.9 --extra-ldflags=-L/usr/local/lib \
+ --extra-cflags=-I/usr/local/include {configure_opts};
gmake --output-sync -j{jobs} {target} {verbose};
"""
diff --git a/ui/input-legacy.c b/ui/input-legacy.c
index 46ea74e44d..210ae5eaca 100644
--- a/ui/input-legacy.c
+++ b/ui/input-legacy.c
@@ -127,7 +127,7 @@ static void legacy_kbd_event(DeviceState *dev, QemuConsole *src,
}
}
-static QemuInputHandler legacy_kbd_handler = {
+static const QemuInputHandler legacy_kbd_handler = {
.name = "legacy-kbd",
.mask = INPUT_EVENT_MASK_KEY,
.event = legacy_kbd_event,
diff --git a/ui/input.c b/ui/input.c
index cbe8573c5c..dc745860f4 100644
--- a/ui/input.c
+++ b/ui/input.c
@@ -10,7 +10,7 @@
struct QemuInputHandlerState {
DeviceState *dev;
- QemuInputHandler *handler;
+ const QemuInputHandler *handler;
int id;
int events;
QemuConsole *con;
@@ -46,7 +46,7 @@ static uint32_t queue_count;
static uint32_t queue_limit = 1024;
QemuInputHandlerState *qemu_input_handler_register(DeviceState *dev,
- QemuInputHandler *handler)
+ const QemuInputHandler *handler)
{
QemuInputHandlerState *s = g_new0(QemuInputHandlerState, 1);
static int id = 1;
diff --git a/ui/spice-core.c b/ui/spice-core.c
index 52a59386d7..db21db2c94 100644
--- a/ui/spice-core.c
+++ b/ui/spice-core.c
@@ -821,8 +821,7 @@ static void qemu_spice_init(void)
};
using_spice = 1;
- migration_state.notify = migration_state_notifier;
- add_migration_state_change_notifier(&migration_state);
+ migration_add_notifier(&migration_state, migration_state_notifier);
spice_migrate.base.sif = &migrate_interface.base;
qemu_spice.add_interface(&spice_migrate.base);
diff --git a/ui/vdagent.c b/ui/vdagent.c
index 00d36a8677..64d7ab245a 100644
--- a/ui/vdagent.c
+++ b/ui/vdagent.c
@@ -297,7 +297,7 @@ static void vdagent_pointer_sync(DeviceState *dev)
}
}
-static QemuInputHandler vdagent_mouse_handler = {
+static const QemuInputHandler vdagent_mouse_handler = {
.name = "vdagent mouse",
.mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS,
.event = vdagent_pointer_event,
@@ -671,7 +671,7 @@ static void vdagent_chr_open(Chardev *chr,
return;
#endif
- if (migrate_add_blocker(vd->migration_blocker, errp) != 0) {
+ if (migrate_add_blocker(&vd->migration_blocker, errp) != 0) {
return;
}
@@ -924,13 +924,12 @@ static void vdagent_chr_fini(Object *obj)
{
VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(obj);
- migrate_del_blocker(vd->migration_blocker);
+ migrate_del_blocker(&vd->migration_blocker);
vdagent_disconnect(vd);
if (vd->mouse_hs) {
qemu_input_handler_unregister(vd->mouse_hs);
}
buffer_free(&vd->outbuf);
- error_free(vd->migration_blocker);
}
static const TypeInfo vdagent_chr_type_info = {
diff --git a/util/cutils.c b/util/cutils.c
index 64f817b477..42364039a5 100644
--- a/util/cutils.c
+++ b/util/cutils.c
@@ -1163,17 +1163,21 @@ char *get_relocated_path(const char *dir)
g_string_append(result, "/qemu-bundle");
if (access(result->str, R_OK) == 0) {
#ifdef G_OS_WIN32
- size_t size = mbsrtowcs(NULL, &dir, 0, &(mbstate_t){0}) + 1;
+ const char *src = dir;
+ size_t size = mbsrtowcs(NULL, &src, 0, &(mbstate_t){0}) + 1;
PWSTR wdir = g_new(WCHAR, size);
- mbsrtowcs(wdir, &dir, size, &(mbstate_t){0});
+ mbsrtowcs(wdir, &src, size, &(mbstate_t){0});
PCWSTR wdir_skipped_root;
- PathCchSkipRoot(wdir, &wdir_skipped_root);
+ if (PathCchSkipRoot(wdir, &wdir_skipped_root) == S_OK) {
+ size = wcsrtombs(NULL, &wdir_skipped_root, 0, &(mbstate_t){0});
+ char *cursor = result->str + result->len;
+ g_string_set_size(result, result->len + size);
+ wcsrtombs(cursor, &wdir_skipped_root, size + 1, &(mbstate_t){0});
+ } else {
+ g_string_append(result, dir);
+ }
- size = wcsrtombs(NULL, &wdir_skipped_root, 0, &(mbstate_t){0});
- char *cursor = result->str + result->len;
- g_string_set_size(result, result->len + size);
- wcsrtombs(cursor, &wdir_skipped_root, size + 1, &(mbstate_t){0});
g_free(wdir);
#else
g_string_append(result, dir);