aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.d/buildtest.yml2
-rw-r--r--MAINTAINERS8
-rw-r--r--accel/tcg/cputlb.c30
-rw-r--r--accel/tcg/ldst_common.c.inc8
-rw-r--r--accel/tcg/user-exec.c8
-rw-r--r--bsd-user/arm/signal.c196
-rw-r--r--bsd-user/arm/target_arch.h28
-rw-r--r--bsd-user/arm/target_arch_cpu.c39
-rw-r--r--bsd-user/arm/target_arch_cpu.h211
-rw-r--r--bsd-user/arm/target_arch_elf.h128
-rw-r--r--bsd-user/arm/target_arch_reg.h60
-rw-r--r--bsd-user/arm/target_arch_signal.h88
-rw-r--r--bsd-user/arm/target_arch_sigtramp.h49
-rw-r--r--bsd-user/arm/target_arch_sysarch.h6
-rw-r--r--bsd-user/arm/target_arch_thread.h82
-rw-r--r--bsd-user/arm/target_arch_vmparam.h48
-rw-r--r--bsd-user/arm/target_syscall.h27
-rw-r--r--bsd-user/freebsd/target_os_signal.h3
-rw-r--r--bsd-user/freebsd/target_os_ucontext.h44
-rw-r--r--bsd-user/i386/signal.c55
-rw-r--r--bsd-user/i386/target_arch_signal.h95
-rw-r--r--bsd-user/mips/target_arch_sysarch.h69
-rw-r--r--bsd-user/mips/target_syscall.h52
-rw-r--r--bsd-user/mips64/target_arch_sysarch.h69
-rw-r--r--bsd-user/mips64/target_syscall.h53
-rw-r--r--bsd-user/x86_64/signal.c55
-rw-r--r--bsd-user/x86_64/target_arch_signal.h103
-rw-r--r--chardev/char-socket.c17
-rw-r--r--common-user/host/i386/safe-syscall.inc.S1
-rw-r--r--common-user/host/mips/safe-syscall.inc.S1
-rw-r--r--common-user/host/x86_64/safe-syscall.inc.S1
-rw-r--r--configs/targets/arm-bsd-user.mak2
-rw-r--r--cpu.c20
-rw-r--r--disas/riscv.c5
-rw-r--r--docs/about/deprecated.rst6
-rw-r--r--docs/about/removed-features.rst7
-rw-r--r--docs/interop/index.rst1
-rw-r--r--docs/interop/virtio-balloon-stats.rst (renamed from docs/virtio-balloon-stats.txt)58
-rw-r--r--docs/sphinx/fakedbusdoc.py4
-rw-r--r--docs/tools/qemu-trace-stap.rst24
-rw-r--r--hw/acpi/core.c4
-rw-r--r--hw/acpi/pcihp.c12
-rw-r--r--hw/arm/aspeed_ast2600.c8
-rw-r--r--hw/arm/npcm7xx_boards.c27
-rw-r--r--hw/arm/virt-acpi-build.c1
-rw-r--r--hw/arm/virt.c11
-rw-r--r--hw/block/vhost-user-blk.c5
-rw-r--r--hw/core/machine.c3
-rw-r--r--hw/display/macfb.c2
-rw-r--r--hw/dma/sifive_pdma.c181
-rw-r--r--hw/i386/acpi-build.c9
-rw-r--r--hw/i386/intel_iommu.c38
-rw-r--r--hw/i386/pc.c39
-rw-r--r--hw/i386/pc_piix.c16
-rw-r--r--hw/i386/pc_q35.c15
-rw-r--r--hw/intc/arm_gicv3_its.c234
-rw-r--r--hw/intc/gicv3_internal.h40
-rw-r--r--hw/intc/sifive_plic.c254
-rw-r--r--hw/m68k/q800.c5
-rw-r--r--hw/m68k/virt.c9
-rw-r--r--hw/pci/pci.c9
-rw-r--r--hw/pci/pci_host.c6
-rw-r--r--hw/pci/pcie_aer.c4
-rw-r--r--hw/pci/trace-events8
-rw-r--r--hw/ppc/spapr.c15
-rw-r--r--hw/riscv/microchip_pfsoc.c2
-rw-r--r--hw/riscv/opentitan.c2
-rw-r--r--hw/riscv/sifive_e.c2
-rw-r--r--hw/riscv/sifive_u.c2
-rw-r--r--hw/s390x/s390-virtio-ccw.c14
-rw-r--r--hw/scsi/vhost-scsi.c15
-rw-r--r--hw/sd/sd.c52
-rw-r--r--hw/smbios/smbios.c8
-rw-r--r--hw/virtio/vhost-backend.c4
-rw-r--r--hw/virtio/vhost-user.c401
-rw-r--r--hw/virtio/vhost-vdpa.c37
-rw-r--r--hw/virtio/vhost-vsock.c11
-rw-r--r--hw/virtio/vhost.c98
-rw-r--r--hw/virtio/virtio-mem.c105
-rw-r--r--hw/virtio/virtio.c1
-rw-r--r--include/disas/dis-asm.h1
-rw-r--r--include/exec/memop.h15
-rw-r--r--include/hw/arm/aspeed_soc.h2
-rw-r--r--include/hw/boards.h3
-rw-r--r--include/hw/core/cpu.h3
-rw-r--r--include/hw/firmware/smbios.h10
-rw-r--r--include/hw/i386/pc.h7
-rw-r--r--include/hw/intc/arm_gicv3_its_common.h9
-rw-r--r--include/hw/pci/pci.h5
-rw-r--r--include/hw/riscv/virt.h2
-rw-r--r--include/hw/virtio/virtio-mem.h12
-rw-r--r--include/qemu/int128.h27
-rw-r--r--include/qemu/osdep.h7
-rw-r--r--include/standard-headers/linux/virtio_mem.h9
-rw-r--r--include/sysemu/sysemu.h1
-rw-r--r--include/tcg/tcg-op.h4
-rw-r--r--linux-user/aarch64/target_prctl.h160
-rw-r--r--linux-user/aarch64/target_signal.h18
-rw-r--r--linux-user/aarch64/target_syscall.h24
-rw-r--r--linux-user/alpha/target_prctl.h1
-rw-r--r--linux-user/alpha/target_signal.h1
-rw-r--r--linux-user/alpha/target_syscall.h1
-rw-r--r--linux-user/arm/target_prctl.h1
-rw-r--r--linux-user/arm/target_signal.h18
-rw-r--r--linux-user/arm/target_syscall.h1
-rw-r--r--linux-user/cris/target_prctl.h1
-rw-r--r--linux-user/cris/target_signal.h18
-rw-r--r--linux-user/cris/target_syscall.h1
-rw-r--r--linux-user/elfload.c66
-rw-r--r--linux-user/fd-trans.c184
-rw-r--r--linux-user/generic/signal.h15
-rw-r--r--linux-user/generic/target_prctl_unalign.h27
-rw-r--r--linux-user/hexagon/target_prctl.h1
-rw-r--r--linux-user/hexagon/target_signal.h11
-rw-r--r--linux-user/host/loongarch64/host-signal.h4
-rw-r--r--linux-user/hppa/target_prctl.h1
-rw-r--r--linux-user/hppa/target_signal.h1
-rw-r--r--linux-user/hppa/target_syscall.h1
-rw-r--r--linux-user/i386/target_prctl.h1
-rw-r--r--linux-user/i386/target_signal.h18
-rw-r--r--linux-user/i386/target_syscall.h1
-rw-r--r--linux-user/m68k/target_prctl.h1
-rw-r--r--linux-user/m68k/target_signal.h18
-rw-r--r--linux-user/m68k/target_syscall.h1
-rw-r--r--linux-user/microblaze/target_prctl.h1
-rw-r--r--linux-user/microblaze/target_signal.h18
-rw-r--r--linux-user/microblaze/target_syscall.h1
-rw-r--r--linux-user/mips/target_prctl.h88
-rw-r--r--linux-user/mips/target_signal.h1
-rw-r--r--linux-user/mips/target_syscall.h7
-rw-r--r--linux-user/mips64/target_prctl.h1
-rw-r--r--linux-user/mips64/target_signal.h1
-rw-r--r--linux-user/mips64/target_syscall.h7
-rw-r--r--linux-user/nios2/cpu_loop.c93
-rw-r--r--linux-user/nios2/signal.c58
-rw-r--r--linux-user/nios2/target_prctl.h1
-rw-r--r--linux-user/nios2/target_signal.h16
-rw-r--r--linux-user/nios2/target_syscall.h1
-rw-r--r--linux-user/openrisc/target_prctl.h1
-rw-r--r--linux-user/openrisc/target_signal.h23
-rw-r--r--linux-user/openrisc/target_syscall.h1
-rw-r--r--linux-user/ppc/target_prctl.h1
-rw-r--r--linux-user/ppc/target_signal.h18
-rw-r--r--linux-user/ppc/target_syscall.h1
-rw-r--r--linux-user/riscv/target_prctl.h1
-rw-r--r--linux-user/riscv/target_signal.h12
-rw-r--r--linux-user/riscv/target_syscall.h1
-rw-r--r--linux-user/s390x/target_prctl.h1
-rw-r--r--linux-user/s390x/target_signal.h15
-rw-r--r--linux-user/s390x/target_syscall.h1
-rw-r--r--linux-user/sh4/target_prctl.h1
-rw-r--r--linux-user/sh4/target_signal.h18
-rw-r--r--linux-user/sh4/target_syscall.h1
-rw-r--r--linux-user/signal.c9
-rw-r--r--linux-user/sparc/target_prctl.h1
-rw-r--r--linux-user/sparc/target_signal.h1
-rw-r--r--linux-user/sparc/target_syscall.h1
-rw-r--r--linux-user/syscall.c657
-rw-r--r--linux-user/syscall_defs.h46
-rw-r--r--linux-user/user-internals.h2
-rw-r--r--linux-user/x86_64/target_prctl.h1
-rw-r--r--linux-user/x86_64/target_signal.h18
-rw-r--r--linux-user/x86_64/target_syscall.h1
-rw-r--r--linux-user/xtensa/target_prctl.h1
-rw-r--r--linux-user/xtensa/target_signal.h17
-rw-r--r--meson.build2
-rw-r--r--pc-bios/opensbi-riscv32-generic-fw_dynamic.binbin78680 -> 108504 bytes
-rw-r--r--pc-bios/opensbi-riscv32-generic-fw_dynamic.elfbin727464 -> 838904 bytes
-rw-r--r--pc-bios/opensbi-riscv64-generic-fw_dynamic.binbin75096 -> 105296 bytes
-rw-r--r--pc-bios/opensbi-riscv64-generic-fw_dynamic.elfbin781264 -> 934696 bytes
-rw-r--r--python/qemu/aqmp/aqmp_tui.py3
-rw-r--r--python/qemu/aqmp/protocol.py5
-rw-r--r--python/qemu/qmp/qom_common.py6
-rw-r--r--qapi/machine.json12
-rw-r--r--qemu-options.hx8
m---------roms/opensbi0
-rwxr-xr-xscripts/qemu-binfmt-conf.sh4
-rw-r--r--scripts/simplebench/bench-example.py2
-rw-r--r--softmmu/cpus.c4
-rw-r--r--softmmu/vl.c8
-rw-r--r--target/alpha/cpu.h5
-rw-r--r--target/alpha/translate.c55
-rw-r--r--target/arm/helper-a64.c8
-rw-r--r--target/arm/helper.c32
-rw-r--r--target/arm/translate-a32.h4
-rw-r--r--target/arm/translate-a64.c8
-rw-r--r--target/arm/translate-neon.c6
-rw-r--r--target/arm/translate-sve.c10
-rw-r--r--target/arm/translate-vfp.c8
-rw-r--r--target/arm/translate.c2
-rw-r--r--target/cris/translate.c2
-rw-r--r--target/hppa/cpu.h5
-rw-r--r--target/hppa/translate.c23
-rw-r--r--target/i386/tcg/mem_helper.c2
-rw-r--r--target/i386/tcg/translate.c36
-rw-r--r--target/m68k/op_helper.c7
-rw-r--r--target/mips/tcg/micromips_translate.c.inc10
-rw-r--r--target/mips/tcg/translate.c58
-rw-r--r--target/mips/tcg/tx79_translate.c8
-rw-r--r--target/nios2/cpu.h2
-rw-r--r--target/nios2/translate.c26
-rw-r--r--target/ppc/translate.c32
-rw-r--r--target/ppc/translate/fixedpoint-impl.c.inc22
-rw-r--r--target/ppc/translate/fp-impl.c.inc4
-rw-r--r--target/ppc/translate/vsx-impl.c.inc42
-rw-r--r--target/riscv/cpu.c34
-rw-r--r--target/riscv/cpu.h24
-rw-r--r--target/riscv/cpu_bits.h3
-rw-r--r--target/riscv/cpu_helper.c24
-rw-r--r--target/riscv/csr.c194
-rw-r--r--target/riscv/gdbstub.c5
-rw-r--r--target/riscv/helper.h9
-rw-r--r--target/riscv/insn16.decode27
-rw-r--r--target/riscv/insn32.decode25
-rw-r--r--target/riscv/insn_trans/trans_rva.c.inc22
-rw-r--r--target/riscv/insn_trans/trans_rvb.c.inc48
-rw-r--r--target/riscv/insn_trans/trans_rvd.c.inc4
-rw-r--r--target/riscv/insn_trans/trans_rvh.c.inc4
-rw-r--r--target/riscv/insn_trans/trans_rvi.c.inc716
-rw-r--r--target/riscv/insn_trans/trans_rvm.c.inc192
-rw-r--r--target/riscv/insn_trans/trans_rvv.c.inc78
-rw-r--r--target/riscv/m128_helper.c109
-rw-r--r--target/riscv/machine.c22
-rw-r--r--target/riscv/meson.build1
-rw-r--r--target/riscv/op_helper.c47
-rw-r--r--target/riscv/translate.c257
-rw-r--r--target/s390x/tcg/insn-data.def28
-rw-r--r--target/s390x/tcg/mem_helper.c8
-rw-r--r--target/s390x/tcg/translate.c8
-rw-r--r--target/s390x/tcg/translate_vx.c.inc18
-rw-r--r--target/sh4/cpu.h4
-rw-r--r--target/sh4/translate.c62
-rw-r--r--target/sparc/translate.c36
-rw-r--r--target/tricore/translate.c4
-rw-r--r--target/xtensa/translate.c4
-rw-r--r--tcg/aarch64/tcg-target.c.inc2
-rw-r--r--tcg/arm/tcg-target.c.inc10
-rw-r--r--tcg/i386/tcg-target.c.inc12
-rw-r--r--tcg/mips/tcg-target.c.inc12
-rw-r--r--tcg/optimize.c49
-rw-r--r--tcg/ppc/tcg-target.c.inc16
-rw-r--r--tcg/riscv/tcg-target.c.inc6
-rw-r--r--tcg/s390x/tcg-target.c.inc18
-rw-r--r--tcg/sparc/tcg-target.c.inc16
-rw-r--r--tcg/tcg.c4
-rw-r--r--tcg/tci.c16
-rw-r--r--tests/data/acpi/q35/DSDT.tis.tpm12bin8894 -> 8900 bytes
-rw-r--r--tests/data/acpi/q35/DSDT.tis.tpm2bin8894 -> 8921 bytes
-rw-r--r--tests/data/acpi/q35/FACP.slicbin0 -> 244 bytes
-rw-r--r--tests/data/acpi/q35/SLIC.slicbin0 -> 36 bytes
-rw-r--r--tests/qtest/bios-tables-test.c15
-rw-r--r--tests/qtest/hd-geo-test.c8
-rw-r--r--tests/qtest/test-x86-cpuid-compat.c85
-rw-r--r--tests/unit/test-util-sockets.c6
-rw-r--r--util/int128.c147
-rw-r--r--util/meson.build1
-rw-r--r--util/oslib-posix.c232
257 files changed, 5904 insertions, 2569 deletions
diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml
index 7e1cb0b3c2..12fb1130fe 100644
--- a/.gitlab-ci.d/buildtest.yml
+++ b/.gitlab-ci.d/buildtest.yml
@@ -164,7 +164,7 @@ build-system-centos:
variables:
IMAGE: centos8
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-fdt=system
- --enable-modules --enable-trace-backends=dtrace
+ --enable-modules --enable-trace-backends=dtrace --enable-docs
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build
diff --git a/MAINTAINERS b/MAINTAINERS
index f871d759fd..c98a61caee 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1777,6 +1777,13 @@ F: docs/specs/acpi_mem_hotplug.rst
F: docs/specs/acpi_pci_hotplug.rst
F: docs/specs/acpi_hw_reduced_hotplug.rst
+ACPI/VIOT
+M: Jean-Philippe Brucker <jean-philippe@linaro.org>
+R: Ani Sinha <ani@anisinha.ca>
+S: Supported
+F: hw/acpi/viot.c
+F: hw/acpi/viot.h
+
ACPI/HEST/GHES
R: Dongjiu Geng <gengdongjiu1@gmail.com>
L: qemu-arm@nongnu.org
@@ -1925,6 +1932,7 @@ virtio-balloon
M: Michael S. Tsirkin <mst@redhat.com>
M: David Hildenbrand <david@redhat.com>
S: Maintained
+F: docs/interop/virtio-balloon-stats.rst
F: hw/virtio/virtio-balloon*.c
F: include/hw/virtio/virtio-balloon.h
F: softmmu/balloon.c
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index b69a953447..5e0d0eebc3 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1885,9 +1885,9 @@ load_memop(const void *haddr, MemOp op)
return (uint32_t)ldl_be_p(haddr);
case MO_LEUL:
return (uint32_t)ldl_le_p(haddr);
- case MO_BEQ:
+ case MO_BEUQ:
return ldq_be_p(haddr);
- case MO_LEQ:
+ case MO_LEUQ:
return ldq_le_p(haddr);
default:
qemu_build_not_reached();
@@ -2081,16 +2081,16 @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_LEQ);
- return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
+ validate_memop(oi, MO_LEUQ);
+ return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
helper_le_ldq_mmu);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEQ);
- return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
+ validate_memop(oi, MO_BEUQ);
+ return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
helper_be_ldq_mmu);
}
@@ -2166,7 +2166,7 @@ uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
- return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu);
+ return cpu_load_helper(env, addr, oi, MO_BEUQ, helper_be_ldq_mmu);
}
uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
@@ -2210,10 +2210,10 @@ store_memop(void *haddr, uint64_t val, MemOp op)
case MO_LEUL:
stl_le_p(haddr, val);
break;
- case MO_BEQ:
+ case MO_BEUQ:
stq_be_p(haddr, val);
break;
- case MO_LEQ:
+ case MO_LEUQ:
stq_le_p(haddr, val);
break;
default:
@@ -2465,15 +2465,15 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_LEQ);
- store_helper(env, addr, val, oi, retaddr, MO_LEQ);
+ validate_memop(oi, MO_LEUQ);
+ store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
}
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEQ);
- store_helper(env, addr, val, oi, retaddr, MO_BEQ);
+ validate_memop(oi, MO_BEUQ);
+ store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
}
/*
@@ -2609,11 +2609,11 @@ uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
+ return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
- MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
return full_ldq_code(env, addr, oi, 0);
}
diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc
index bfefb275e7..6ac8d871a3 100644
--- a/accel/tcg/ldst_common.c.inc
+++ b/accel/tcg/ldst_common.c.inc
@@ -45,7 +45,7 @@ uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_be_mmu(env, addr, oi, ra);
}
@@ -72,7 +72,7 @@ uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_le_mmu(env, addr, oi, ra);
}
@@ -100,7 +100,7 @@ void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
- MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
cpu_stq_be_mmu(env, addr, val, oi, ra);
}
@@ -121,7 +121,7 @@ void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
- MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
cpu_stq_le_mmu(env, addr, val, oi, ra);
}
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 1528a21fad..6f5d4933f0 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -294,7 +294,7 @@ uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
void *haddr;
uint64_t ret;
- validate_memop(oi, MO_BEQ);
+ validate_memop(oi, MO_BEUQ);
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_be_p(haddr);
@@ -339,7 +339,7 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
void *haddr;
uint64_t ret;
- validate_memop(oi, MO_LEQ);
+ validate_memop(oi, MO_LEUQ);
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_le_p(haddr);
@@ -392,7 +392,7 @@ void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
{
void *haddr;
- validate_memop(oi, MO_BEQ);
+ validate_memop(oi, MO_BEUQ);
trace_guest_st_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_be_p(haddr, val);
@@ -431,7 +431,7 @@ void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
{
void *haddr;
- validate_memop(oi, MO_LEQ);
+ validate_memop(oi, MO_LEUQ);
trace_guest_st_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_le_p(haddr, val);
diff --git a/bsd-user/arm/signal.c b/bsd-user/arm/signal.c
new file mode 100644
index 0000000000..1478f008d1
--- /dev/null
+++ b/bsd-user/arm/signal.c
@@ -0,0 +1,196 @@
+/*
+ * arm signal functions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu.h"
+
+/*
+ * Compare to arm/arm/machdep.c sendsig()
+ * Assumes that target stack frame memory is locked.
+ */
+abi_long set_sigtramp_args(CPUARMState *env, int sig,
+ struct target_sigframe *frame,
+ abi_ulong frame_addr,
+ struct target_sigaction *ka)
+{
+ /*
+ * Arguments to signal handler:
+ * r0 = signal number
+ * r1 = siginfo pointer
+ * r2 = ucontext pointer
+ * r5 = ucontext pointer
+ * pc = signal handler pointer
+ * sp = sigframe struct pointer
+ * lr = sigtramp at base of user stack
+ */
+
+ env->regs[0] = sig;
+ env->regs[1] = frame_addr +
+ offsetof(struct target_sigframe, sf_si);
+ env->regs[2] = frame_addr +
+ offsetof(struct target_sigframe, sf_uc);
+
+ /* the trampoline uses r5 as the uc address */
+ env->regs[5] = frame_addr +
+ offsetof(struct target_sigframe, sf_uc);
+ env->regs[TARGET_REG_PC] = ka->_sa_handler & ~1;
+ env->regs[TARGET_REG_SP] = frame_addr;
+ env->regs[TARGET_REG_LR] = TARGET_PS_STRINGS - TARGET_SZSIGCODE;
+ /*
+ * Low bit indicates whether or not we're entering thumb mode.
+ */
+ cpsr_write(env, (ka->_sa_handler & 1) * CPSR_T, CPSR_T, CPSRWriteByInstr);
+
+ return 0;
+}
+
+/*
+ * Compare to arm/arm/machdep.c get_mcontext()
+ * Assumes that the memory is locked if mcp points to user memory.
+ */
+abi_long get_mcontext(CPUARMState *env, target_mcontext_t *mcp, int flags)
+{
+ int err = 0;
+ uint32_t *gr = mcp->__gregs;
+
+ if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(target_mcontext_vfp_t)) {
+ return -TARGET_EINVAL;
+ }
+
+ gr[TARGET_REG_CPSR] = tswap32(cpsr_read(env));
+ if (flags & TARGET_MC_GET_CLEAR_RET) {
+ gr[TARGET_REG_R0] = 0;
+ gr[TARGET_REG_CPSR] &= ~CPSR_C;
+ } else {
+ gr[TARGET_REG_R0] = tswap32(env->regs[0]);
+ }
+
+ gr[TARGET_REG_R1] = tswap32(env->regs[1]);
+ gr[TARGET_REG_R2] = tswap32(env->regs[2]);
+ gr[TARGET_REG_R3] = tswap32(env->regs[3]);
+ gr[TARGET_REG_R4] = tswap32(env->regs[4]);
+ gr[TARGET_REG_R5] = tswap32(env->regs[5]);
+ gr[TARGET_REG_R6] = tswap32(env->regs[6]);
+ gr[TARGET_REG_R7] = tswap32(env->regs[7]);
+ gr[TARGET_REG_R8] = tswap32(env->regs[8]);
+ gr[TARGET_REG_R9] = tswap32(env->regs[9]);
+ gr[TARGET_REG_R10] = tswap32(env->regs[10]);
+ gr[TARGET_REG_R11] = tswap32(env->regs[11]);
+ gr[TARGET_REG_R12] = tswap32(env->regs[12]);
+
+ gr[TARGET_REG_SP] = tswap32(env->regs[13]);
+ gr[TARGET_REG_LR] = tswap32(env->regs[14]);
+ gr[TARGET_REG_PC] = tswap32(env->regs[15]);
+
+ if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr != 0) {
+ /* see get_vfpcontext in sys/arm/arm/exec_machdep.c */
+ target_mcontext_vfp_t *vfp;
+ vfp = lock_user(VERIFY_WRITE, mcp->mc_vfp_ptr, sizeof(*vfp), 0);
+ for (int i = 0; i < 32; i++) {
+ vfp->mcv_reg[i] = tswap64(*aa32_vfp_dreg(env, i));
+ }
+ vfp->mcv_fpscr = tswap32(vfp_get_fpscr(env));
+ unlock_user(vfp, mcp->mc_vfp_ptr, sizeof(*vfp));
+ }
+ return err;
+}
+
+/* Compare to arm/arm/exec_machdep.c set_mcontext() */
+abi_long set_mcontext(CPUARMState *env, target_mcontext_t *mcp, int srflag)
+{
+ int err = 0;
+ const uint32_t *gr = mcp->__gregs;
+ uint32_t cpsr, ccpsr = cpsr_read(env);
+ uint32_t fpscr, mask;
+
+ cpsr = tswap32(gr[TARGET_REG_CPSR]);
+ /*
+ * Only allow certain bits to change, reject attempted changes to non-user
+ * bits. In addition, make sure we're headed for user mode and none of the
+ * interrupt bits are set.
+ */
+ if ((ccpsr & ~CPSR_USER) != (cpsr & ~CPSR_USER)) {
+ return -TARGET_EINVAL;
+ }
+ if ((cpsr & CPSR_M) != ARM_CPU_MODE_USR ||
+ (cpsr & (CPSR_I | CPSR_F)) != 0) {
+ return -TARGET_EINVAL;
+ }
+
+ /*
+ * The movs pc,lr instruction that implements the return to userland masks
+ * these bits out.
+ */
+ mask = cpsr & CPSR_T ? 0x1 : 0x3;
+
+ /*
+ * Make sure that we either have no vfp, or it's the correct size.
+ * FreeBSD just ignores it, though, so maybe we'll need to adjust
+ * things below instead.
+ */
+ if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(target_mcontext_vfp_t)) {
+ return -TARGET_EINVAL;
+ }
+
+ env->regs[0] = tswap32(gr[TARGET_REG_R0]);
+ env->regs[1] = tswap32(gr[TARGET_REG_R1]);
+ env->regs[2] = tswap32(gr[TARGET_REG_R2]);
+ env->regs[3] = tswap32(gr[TARGET_REG_R3]);
+ env->regs[4] = tswap32(gr[TARGET_REG_R4]);
+ env->regs[5] = tswap32(gr[TARGET_REG_R5]);
+ env->regs[6] = tswap32(gr[TARGET_REG_R6]);
+ env->regs[7] = tswap32(gr[TARGET_REG_R7]);
+ env->regs[8] = tswap32(gr[TARGET_REG_R8]);
+ env->regs[9] = tswap32(gr[TARGET_REG_R9]);
+ env->regs[10] = tswap32(gr[TARGET_REG_R10]);
+ env->regs[11] = tswap32(gr[TARGET_REG_R11]);
+ env->regs[12] = tswap32(gr[TARGET_REG_R12]);
+
+ env->regs[13] = tswap32(gr[TARGET_REG_SP]);
+ env->regs[14] = tswap32(gr[TARGET_REG_LR]);
+ env->regs[15] = tswap32(gr[TARGET_REG_PC] & ~mask);
+ if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr != 0) {
+ /* see set_vfpcontext in sys/arm/arm/exec_machdep.c */
+ target_mcontext_vfp_t *vfp;
+
+ vfp = lock_user(VERIFY_READ, mcp->mc_vfp_ptr, sizeof(*vfp), 1);
+ for (int i = 0; i < 32; i++) {
+ __get_user(*aa32_vfp_dreg(env, i), &vfp->mcv_reg[i]);
+ }
+ __get_user(fpscr, &vfp->mcv_fpscr);
+ vfp_set_fpscr(env, fpscr);
+ unlock_user(vfp, mcp->mc_vfp_ptr, sizeof(target_ucontext_t));
+
+ /*
+ * linux-user sets fpexc, fpinst and fpinst2, but these aren't in
+ * FreeBSD's mcontext, what to do?
+ */
+ }
+ cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
+
+ return err;
+}
+
+/* Compare to arm/arm/machdep.c sys_sigreturn() */
+abi_long get_ucontext_sigreturn(CPUARMState *env, abi_ulong target_sf,
+ abi_ulong *target_uc)
+{
+ *target_uc = target_sf;
+
+ return 0;
+}
diff --git a/bsd-user/arm/target_arch.h b/bsd-user/arm/target_arch.h
new file mode 100644
index 0000000000..93cfaea098
--- /dev/null
+++ b/bsd-user/arm/target_arch.h
@@ -0,0 +1,28 @@
+/*
+ * ARM 32-bit specific prototypes for bsd-user
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_H_
+#define _TARGET_ARCH_H_
+
+#include "qemu.h"
+
+void target_cpu_set_tls(CPUARMState *env, target_ulong newtls);
+target_ulong target_cpu_get_tls(CPUARMState *env);
+
+#endif /* !_TARGET_ARCH_H_ */
diff --git a/bsd-user/arm/target_arch_cpu.c b/bsd-user/arm/target_arch_cpu.c
new file mode 100644
index 0000000000..02bf9149d5
--- /dev/null
+++ b/bsd-user/arm/target_arch_cpu.c
@@ -0,0 +1,39 @@
+/*
+ * arm cpu related code
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "target_arch.h"
+
+void target_cpu_set_tls(CPUARMState *env, target_ulong newtls)
+{
+ if (access_secure_reg(env)) {
+ env->cp15.tpidrurw_s = newtls;
+ env->cp15.tpidruro_s = newtls;
+ return;
+ }
+
+ env->cp15.tpidr_el[0] = newtls;
+ env->cp15.tpidrro_el[0] = newtls;
+}
+
+target_ulong target_cpu_get_tls(CPUARMState *env)
+{
+ if (access_secure_reg(env)) {
+ return env->cp15.tpidruro_s;
+ }
+ return env->cp15.tpidrro_el[0];
+}
diff --git a/bsd-user/arm/target_arch_cpu.h b/bsd-user/arm/target_arch_cpu.h
new file mode 100644
index 0000000000..c675419c30
--- /dev/null
+++ b/bsd-user/arm/target_arch_cpu.h
@@ -0,0 +1,211 @@
+/*
+ * arm cpu init and loop
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_CPU_H_
+#define _TARGET_ARCH_CPU_H_
+
+#include "target_arch.h"
+
+#define TARGET_DEFAULT_CPU_MODEL "any"
+
+static inline void target_cpu_init(CPUARMState *env,
+ struct target_pt_regs *regs)
+{
+ int i;
+
+ cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
+ CPSRWriteByInstr);
+ for (i = 0; i < 16; i++) {
+ env->regs[i] = regs->uregs[i];
+ }
+}
+
+static inline void target_cpu_loop(CPUARMState *env)
+{
+ int trapnr;
+ target_siginfo_t info;
+ unsigned int n;
+ CPUState *cs = env_cpu(env);
+
+ for (;;) {
+ cpu_exec_start(cs);
+ trapnr = cpu_exec(cs);
+ cpu_exec_end(cs);
+ process_queued_cpu_work(cs);
+ switch (trapnr) {
+ case EXCP_UDEF:
+ {
+ /* See arm/arm/undefined.c undefinedinstruction(); */
+ info.si_addr = env->regs[15];
+
+ /* illegal instruction */
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_ILLOPC;
+ queue_signal(env, info.si_signo, &info);
+
+ /* TODO: What about instruction emulation? */
+ }
+ break;
+ case EXCP_SWI:
+ case EXCP_BKPT:
+ {
+ /*
+ * system call
+ * See arm/arm/trap.c cpu_fetch_syscall_args()
+ */
+ if (trapnr == EXCP_BKPT) {
+ if (env->thumb) {
+ env->regs[15] += 2;
+ } else {
+ env->regs[15] += 4;
+ }
+ }
+ n = env->regs[7];
+ if (bsd_type == target_freebsd) {
+ int ret;
+ abi_ulong params = get_sp_from_cpustate(env);
+ int32_t syscall_nr = n;
+ int32_t arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8;
+
+ /* See arm/arm/trap.c cpu_fetch_syscall_args() */
+ if (syscall_nr == TARGET_FREEBSD_NR_syscall) {
+ syscall_nr = env->regs[0];
+ arg1 = env->regs[1];
+ arg2 = env->regs[2];
+ arg3 = env->regs[3];
+ get_user_s32(arg4, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg5, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg6, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg7, params);
+ arg8 = 0;
+ } else if (syscall_nr == TARGET_FREEBSD_NR___syscall) {
+ syscall_nr = env->regs[0];
+ arg1 = env->regs[2];
+ arg2 = env->regs[3];
+ get_user_s32(arg3, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg4, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg5, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg6, params);
+ arg7 = 0;
+ arg8 = 0;
+ } else {
+ arg1 = env->regs[0];
+ arg2 = env->regs[1];
+ arg3 = env->regs[2];
+ arg4 = env->regs[3];
+ get_user_s32(arg5, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg6, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg7, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg8, params);
+ }
+ ret = do_freebsd_syscall(env, syscall_nr, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7, arg8);
+ /*
+ * Compare to arm/arm/vm_machdep.c
+ * cpu_set_syscall_retval()
+ */
+ if (-TARGET_EJUSTRETURN == ret) {
+ /*
+ * Returning from a successful sigreturn syscall.
+ * Avoid clobbering register state.
+ */
+ break;
+ }
+ if (-TARGET_ERESTART == ret) {
+ env->regs[15] -= env->thumb ? 2 : 4;
+ break;
+ }
+ if ((unsigned int)ret >= (unsigned int)(-515)) {
+ ret = -ret;
+ cpsr_write(env, CPSR_C, CPSR_C, CPSRWriteByInstr);
+ env->regs[0] = ret;
+ } else {
+ cpsr_write(env, 0, CPSR_C, CPSRWriteByInstr);
+ env->regs[0] = ret; /* XXX need to handle lseek()? */
+ /* env->regs[1] = 0; */
+ }
+ } else {
+ fprintf(stderr, "qemu: bsd_type (= %d) syscall "
+ "not supported\n", bsd_type);
+ }
+ }
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_PREFETCH_ABORT:
+ /* See arm/arm/trap.c prefetch_abort_handler() */
+ case EXCP_DATA_ABORT:
+ /* See arm/arm/trap.c data_abort_handler() */
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = 0;
+ info.si_addr = env->exception.vaddress;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_DEBUG:
+ {
+
+ info.si_signo = TARGET_SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ info.si_addr = env->exception.vaddress;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP_ATOMIC:
+ cpu_exec_step_atomic(cs);
+ break;
+ case EXCP_YIELD:
+ /* nothing to do here for user-mode, just resume guest code */
+ break;
+ default:
+ fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
+ trapnr);
+ cpu_dump_state(cs, stderr, 0);
+ abort();
+ } /* switch() */
+ process_pending_signals(env);
+ } /* for (;;) */
+}
+
+static inline void target_cpu_clone_regs(CPUARMState *env, target_ulong newsp)
+{
+ if (newsp) {
+ env->regs[13] = newsp;
+ }
+ env->regs[0] = 0;
+}
+
+static inline void target_cpu_reset(CPUArchState *cpu)
+{
+}
+
+#endif /* !_TARGET_ARCH_CPU_H */
diff --git a/bsd-user/arm/target_arch_elf.h b/bsd-user/arm/target_arch_elf.h
new file mode 100644
index 0000000000..4a0215d02e
--- /dev/null
+++ b/bsd-user/arm/target_arch_elf.h
@@ -0,0 +1,128 @@
+/*
+ * arm ELF definitions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _TARGET_ARCH_ELF_H_
+#define _TARGET_ARCH_ELF_H_
+
+#define ELF_START_MMAP 0x80000000
+#define ELF_ET_DYN_LOAD_ADDR 0x500000
+
+#define elf_check_arch(x) ((x) == EM_ARM)
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_ARM
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+#define ELF_HWCAP get_elf_hwcap()
+#define ELF_HWCAP2 get_elf_hwcap2()
+
+#define GET_FEATURE(feat, hwcap) \
+ do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
+
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
+enum {
+ ARM_HWCAP_ARM_SWP = 1 << 0,
+ ARM_HWCAP_ARM_HALF = 1 << 1,
+ ARM_HWCAP_ARM_THUMB = 1 << 2,
+ ARM_HWCAP_ARM_26BIT = 1 << 3,
+ ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
+ ARM_HWCAP_ARM_FPA = 1 << 5,
+ ARM_HWCAP_ARM_VFP = 1 << 6,
+ ARM_HWCAP_ARM_EDSP = 1 << 7,
+ ARM_HWCAP_ARM_JAVA = 1 << 8,
+ ARM_HWCAP_ARM_IWMMXT = 1 << 9,
+ ARM_HWCAP_ARM_CRUNCH = 1 << 10,
+ ARM_HWCAP_ARM_THUMBEE = 1 << 11,
+ ARM_HWCAP_ARM_NEON = 1 << 12,
+ ARM_HWCAP_ARM_VFPv3 = 1 << 13,
+ ARM_HWCAP_ARM_VFPv3D16 = 1 << 14,
+ ARM_HWCAP_ARM_TLS = 1 << 15,
+ ARM_HWCAP_ARM_VFPv4 = 1 << 16,
+ ARM_HWCAP_ARM_IDIVA = 1 << 17,
+ ARM_HWCAP_ARM_IDIVT = 1 << 18,
+ ARM_HWCAP_ARM_VFPD32 = 1 << 19,
+ ARM_HWCAP_ARM_LPAE = 1 << 20,
+ ARM_HWCAP_ARM_EVTSTRM = 1 << 21,
+};
+
+enum {
+ ARM_HWCAP2_ARM_AES = 1 << 0,
+ ARM_HWCAP2_ARM_PMULL = 1 << 1,
+ ARM_HWCAP2_ARM_SHA1 = 1 << 2,
+ ARM_HWCAP2_ARM_SHA2 = 1 << 3,
+ ARM_HWCAP2_ARM_CRC32 = 1 << 4,
+};
+
+static uint32_t get_elf_hwcap(void)
+{
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
+ uint32_t hwcaps = 0;
+
+ hwcaps |= ARM_HWCAP_ARM_SWP;
+ hwcaps |= ARM_HWCAP_ARM_HALF;
+ hwcaps |= ARM_HWCAP_ARM_THUMB;
+ hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
+
+ /* probe for the extra features */
+ /* EDSP is in v5TE and above */
+ GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
+ GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
+ GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
+ GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
+ GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
+ GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE);
+ GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA);
+ GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT);
+ GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP);
+
+ if (cpu_isar_feature(aa32_fpsp_v3, cpu) ||
+ cpu_isar_feature(aa32_fpdp_v3, cpu)) {
+ hwcaps |= ARM_HWCAP_ARM_VFPv3;
+ if (cpu_isar_feature(aa32_simd_r32, cpu)) {
+ hwcaps |= ARM_HWCAP_ARM_VFPD32;
+ } else {
+ hwcaps |= ARM_HWCAP_ARM_VFPv3D16;
+ }
+ }
+ GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4);
+
+ return hwcaps;
+}
+
+static uint32_t get_elf_hwcap2(void)
+{
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
+ uint32_t hwcaps = 0;
+
+ GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
+ GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
+ GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
+ GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
+ GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
+ return hwcaps;
+}
+
+#undef GET_FEATURE
+#undef GET_FEATURE_ID
+
+#endif /* _TARGET_ARCH_ELF_H_ */
diff --git a/bsd-user/arm/target_arch_reg.h b/bsd-user/arm/target_arch_reg.h
new file mode 100644
index 0000000000..ef5ed5154f
--- /dev/null
+++ b/bsd-user/arm/target_arch_reg.h
@@ -0,0 +1,60 @@
+/*
+ * FreeBSD arm register structures
+ *
+ * Copyright (c) 2015 Stacey Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_REG_H_
+#define _TARGET_ARCH_REG_H_
+
+/* See sys/arm/include/reg.h */
+typedef struct target_reg {
+ uint32_t r[13];
+ uint32_t r_sp;
+ uint32_t r_lr;
+ uint32_t r_pc;
+ uint32_t r_cpsr;
+} target_reg_t;
+
+typedef struct target_fp_reg {
+ uint32_t fp_exponent;
+ uint32_t fp_mantissa_hi;
+ u_int32_t fp_mantissa_lo;
+} target_fp_reg_t;
+
+typedef struct target_fpreg {
+ uint32_t fpr_fpsr;
+ target_fp_reg_t fpr[8];
+} target_fpreg_t;
+
+#define tswapreg(ptr) tswapal(ptr)
+
+static inline void target_copy_regs(target_reg_t *regs, const CPUARMState *env)
+{
+ int i;
+
+ for (i = 0; i < 13; i++) {
+ regs->r[i] = tswapreg(env->regs[i + 1]);
+ }
+ regs->r_sp = tswapreg(env->regs[13]);
+ regs->r_lr = tswapreg(env->regs[14]);
+ regs->r_pc = tswapreg(env->regs[15]);
+ regs->r_cpsr = tswapreg(cpsr_read((CPUARMState *)env));
+}
+
+#undef tswapreg
+
+#endif /* !_TARGET_ARCH_REG_H_ */
diff --git a/bsd-user/arm/target_arch_signal.h b/bsd-user/arm/target_arch_signal.h
new file mode 100644
index 0000000000..f1844dbf22
--- /dev/null
+++ b/bsd-user/arm/target_arch_signal.h
@@ -0,0 +1,88 @@
+/*
+ * arm signal definitions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _TARGET_ARCH_SIGNAL_H_
+#define _TARGET_ARCH_SIGNAL_H_
+
+#include "cpu.h"
+
+#define TARGET_REG_R0 0
+#define TARGET_REG_R1 1
+#define TARGET_REG_R2 2
+#define TARGET_REG_R3 3
+#define TARGET_REG_R4 4
+#define TARGET_REG_R5 5
+#define TARGET_REG_R6 6
+#define TARGET_REG_R7 7
+#define TARGET_REG_R8 8
+#define TARGET_REG_R9 9
+#define TARGET_REG_R10 10
+#define TARGET_REG_R11 11
+#define TARGET_REG_R12 12
+#define TARGET_REG_R13 13
+#define TARGET_REG_R14 14
+#define TARGET_REG_R15 15
+#define TARGET_REG_CPSR 16
+#define TARGET__NGREG 17
+/* Convenience synonyms */
+#define TARGET_REG_FP TARGET_REG_R11
+#define TARGET_REG_SP TARGET_REG_R13
+#define TARGET_REG_LR TARGET_REG_R14
+#define TARGET_REG_PC TARGET_REG_R15
+
+#define TARGET_INSN_SIZE 4 /* arm instruction size */
+
+/* Size of the signal trampolin code. See _sigtramp(). */
+#define TARGET_SZSIGCODE ((abi_ulong)(9 * TARGET_INSN_SIZE))
+
+/* compare to arm/include/_limits.h */
+#define TARGET_MINSIGSTKSZ (1024 * 4) /* min sig stack size */
+#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768) /* recommended size */
+
+/*
+ * Floating point register state
+ */
+typedef struct target_mcontext_vfp {
+ abi_ullong mcv_reg[32];
+ abi_ulong mcv_fpscr;
+} target_mcontext_vfp_t;
+
+typedef struct target_mcontext {
+ abi_uint __gregs[TARGET__NGREG];
+
+ /*
+ * Originally, rest of this structure was named __fpu, 35 * 4 bytes
+ * long, never accessed from kernel.
+ */
+ abi_ulong mc_vfp_size;
+ abi_ptr mc_vfp_ptr;
+ abi_int mc_spare[33];
+} target_mcontext_t;
+
+#define TARGET_MCONTEXT_SIZE 208
+#define TARGET_UCONTEXT_SIZE 260
+
+#include "target_os_ucontext.h"
+
+struct target_sigframe {
+ target_siginfo_t sf_si; /* saved siginfo */
+ target_ucontext_t sf_uc; /* saved ucontext */
+ target_mcontext_vfp_t sf_vfp; /* actual saved VFP context */
+};
+
+#endif /* !_TARGET_ARCH_SIGNAL_H_ */
diff --git a/bsd-user/arm/target_arch_sigtramp.h b/bsd-user/arm/target_arch_sigtramp.h
new file mode 100644
index 0000000000..5d434a9e7e
--- /dev/null
+++ b/bsd-user/arm/target_arch_sigtramp.h
@@ -0,0 +1,49 @@
+/*
+ * arm sysarch() system call emulation
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_SIGTRAMP_H_
+#define _TARGET_ARCH_SIGTRAMP_H_
+
+/* Compare to arm/arm/locore.S ENTRY_NP(sigcode) */
+static inline abi_long setup_sigtramp(abi_ulong offset, unsigned sigf_uc,
+ unsigned sys_sigreturn)
+{
+ int i;
+ uint32_t sys_exit = TARGET_FREEBSD_NR_exit;
+ uint32_t sigtramp_code[] = {
+ /* 1 */ 0xE1A0000D, /* mov r0, sp */
+ /* 2 */ 0xE2800000 + sigf_uc, /* add r0, r0, #SIGF_UC */
+ /* 3 */ 0xE59F700C, /* ldr r7, [pc, #12] */
+ /* 4 */ 0xEF000000 + sys_sigreturn, /* swi (SYS_sigreturn) */
+ /* 5 */ 0xE59F7008, /* ldr r7, [pc, #8] */
+ /* 6 */ 0xEF000000 + sys_exit, /* swi (SYS_exit)*/
+ /* 7 */ 0xEAFFFFFA, /* b . -16 */
+ /* 8 */ sys_sigreturn,
+ /* 9 */ sys_exit
+ };
+
+ G_STATIC_ASSERT(sizeof(sigtramp_code) == TARGET_SZSIGCODE);
+
+ for (i = 0; i < 9; i++) {
+ tswap32s(&sigtramp_code[i]);
+ }
+
+ return memcpy_to_target(offset, sigtramp_code, TARGET_SZSIGCODE);
+}
+#endif /* _TARGET_ARCH_SIGTRAMP_H_ */
diff --git a/bsd-user/arm/target_arch_sysarch.h b/bsd-user/arm/target_arch_sysarch.h
index 632a5cd453..8cc6bff207 100644
--- a/bsd-user/arm/target_arch_sysarch.h
+++ b/bsd-user/arm/target_arch_sysarch.h
@@ -17,8 +17,8 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef BSD_USER_ARCH_SYSARCH_H_
-#define BSD_USER_ARCH_SYSARCH_H_
+#ifndef _TARGET_ARCH_SYSARCH_H_
+#define _TARGET_ARCH_SYSARCH_H_
#include "target_syscall.h"
#include "target_arch.h"
@@ -75,4 +75,4 @@ static inline void do_freebsd_arch_print_sysarch(
}
}
-#endif /*!BSD_USER_ARCH_SYSARCH_H_ */
+#endif /*!_TARGET_ARCH_SYSARCH_H_ */
diff --git a/bsd-user/arm/target_arch_thread.h b/bsd-user/arm/target_arch_thread.h
new file mode 100644
index 0000000000..11c7f76583
--- /dev/null
+++ b/bsd-user/arm/target_arch_thread.h
@@ -0,0 +1,82 @@
+/*
+ * arm thread support
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _TARGET_ARCH_THREAD_H_
+#define _TARGET_ARCH_THREAD_H_
+
+/* Compare to arm/arm/vm_machdep.c cpu_set_upcall_kse() */
+static inline void target_thread_set_upcall(CPUARMState *env, abi_ulong entry,
+ abi_ulong arg, abi_ulong stack_base, abi_ulong stack_size)
+{
+ abi_ulong sp;
+
+ /*
+ * Make sure the stack is properly aligned.
+ * arm/include/param.h (STACKLIGN() macro)
+ */
+ sp = (u_int)(stack_base + stack_size) & ~0x7;
+
+ /* sp = stack base */
+ env->regs[13] = sp;
+ /* pc = start function entry */
+ env->regs[15] = entry & 0xfffffffe;
+ /* r0 = arg */
+ env->regs[0] = arg;
+ env->spsr = ARM_CPU_MODE_USR;
+ /*
+ * Thumb mode is encoded by the low bit in the entry point (since ARM can't
+ * execute at odd addresses). When it's set, set the Thumb bit (T) in the
+ * CPSR.
+ */
+ cpsr_write(env, (entry & 1) * CPSR_T, CPSR_T, CPSRWriteByInstr);
+}
+
+static inline void target_thread_init(struct target_pt_regs *regs,
+ struct image_info *infop)
+{
+ abi_long stack = infop->start_stack;
+ memset(regs, 0, sizeof(*regs));
+ regs->ARM_cpsr = ARM_CPU_MODE_USR;
+ /*
+ * Thumb mode is encoded by the low bit in the entry point (since ARM can't
+ * execute at odd addresses). When it's set, set the Thumb bit (T) in the
+ * CPSR.
+ */
+ if (infop->entry & 1) {
+ regs->ARM_cpsr |= CPSR_T;
+ }
+ regs->ARM_pc = infop->entry & 0xfffffffe;
+ regs->ARM_sp = stack;
+ if (bsd_type == target_freebsd) {
+ regs->ARM_lr = infop->entry & 0xfffffffe;
+ }
+ /*
+ * FreeBSD kernel passes the ps_strings pointer in r0. This is used by some
+ * programs to set status messages that we see in ps. bsd-user doesn't
+ * support that functionality, so it's ignored. When set to 0, FreeBSD's csu
+ * code ignores it. For the static case, r1 and r2 are effectively ignored
+ * by the csu __startup() routine. For the dynamic case, rtld saves r0 but
+ * generates r1 and r2 and passes them into the csu _startup.
+ *
+ * r0 ps_strings 0 passed since ps arg setting not supported
+ * r1 obj_main ignored by _start(), so 0 passed
+ * r2 cleanup generated by rtld or ignored by _start(), so 0 passed
+ */
+}
+
+#endif /* !_TARGET_ARCH_THREAD_H_ */
diff --git a/bsd-user/arm/target_arch_vmparam.h b/bsd-user/arm/target_arch_vmparam.h
new file mode 100644
index 0000000000..4bbc04ddf5
--- /dev/null
+++ b/bsd-user/arm/target_arch_vmparam.h
@@ -0,0 +1,48 @@
+/*
+ * arm VM parameters definitions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _TARGET_ARCH_VMPARAM_H_
+#define _TARGET_ARCH_VMPARAM_H_
+
+#include "cpu.h"
+
+/* compare to sys/arm/include/vmparam.h */
+#define TARGET_MAXTSIZ (64 * MiB) /* max text size */
+#define TARGET_DFLDSIZ (128 * MiB) /* initial data size limit */
+#define TARGET_MAXDSIZ (512 * MiB) /* max data size */
+#define TARGET_DFLSSIZ (4 * MiB) /* initial stack size limit */
+#define TARGET_MAXSSIZ (64 * MiB) /* max stack size */
+#define TARGET_SGROWSIZ (128 * KiB) /* amount to grow stack */
+
+#define TARGET_RESERVED_VA 0xf7000000
+
+ /* KERNBASE - 512 MB */
+#define TARGET_VM_MAXUSER_ADDRESS (0xc0000000 - (512 * MiB))
+#define TARGET_USRSTACK TARGET_VM_MAXUSER_ADDRESS
+
+static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
+{
+ return state->regs[13]; /* sp */
+}
+
+static inline void set_second_rval(CPUARMState *state, abi_ulong retval2)
+{
+ state->regs[1] = retval2;
+}
+
+#endif /* ! _TARGET_ARCH_VMPARAM_H_ */
diff --git a/bsd-user/arm/target_syscall.h b/bsd-user/arm/target_syscall.h
index ef4b37f017..a5f2bb4e01 100644
--- a/bsd-user/arm/target_syscall.h
+++ b/bsd-user/arm/target_syscall.h
@@ -1,5 +1,24 @@
-#ifndef BSD_USER_ARCH_SYSCALL_H_
-#define BSD_USER_ARCH_SYSCALL_H_
+/*
+ * arm cpu system call stubs
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_SYSCALL_H_
+#define _TARGET_ARCH_SYSCALL_H_
struct target_pt_regs {
abi_long uregs[17];
@@ -31,6 +50,6 @@ struct target_pt_regs {
#define TARGET_FREEBSD_ARM_GET_TP 3
#define TARGET_HW_MACHINE "arm"
-#define TARGET_HW_MACHINE_ARCH "armv6"
+#define TARGET_HW_MACHINE_ARCH "armv7"
-#endif /* !BSD_USER_ARCH_SYSCALL_H_ */
+#endif /* !_TARGET_ARCH_SYSCALL_H_ */
diff --git a/bsd-user/freebsd/target_os_signal.h b/bsd-user/freebsd/target_os_signal.h
index 1a4c5faf19..3ed454e086 100644
--- a/bsd-user/freebsd/target_os_signal.h
+++ b/bsd-user/freebsd/target_os_signal.h
@@ -1,9 +1,6 @@
#ifndef _TARGET_OS_SIGNAL_H_
#define _TARGET_OS_SIGNAL_H_
-/* FreeBSD's sys/ucontext.h defines this */
-#define TARGET_MC_GET_CLEAR_RET 0x0001
-
#include "target_os_siginfo.h"
#include "target_arch_signal.h"
diff --git a/bsd-user/freebsd/target_os_ucontext.h b/bsd-user/freebsd/target_os_ucontext.h
new file mode 100644
index 0000000000..41b28b2c15
--- /dev/null
+++ b/bsd-user/freebsd/target_os_ucontext.h
@@ -0,0 +1,44 @@
+/*
+ * FreeBSD has a common ucontext definition for all architectures.
+ *
+ * Copyright 2021 Warner Losh <imp@bsdimp.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later OR BSD-3-Clause
+ */
+#ifndef TARGET_OS_UCONTEXT_H
+#define TARGET_OS_UCONTEXT_H
+
+/*
+ * Defines the common bits for all of FreeBSD's architectures. Has to be
+ * included AFTER the MD target_mcontext_t is defined, however, so can't
+ * be in the grab-bag that is target_os_signal.h.
+ */
+
+/* See FreeBSD's sys/ucontext.h */
+#define TARGET_MC_GET_CLEAR_RET 0x0001
+
+/* FreeBSD's sys/_ucontext.h structures */
+typedef struct target_ucontext {
+ target_sigset_t uc_sigmask;
+ target_mcontext_t uc_mcontext;
+ abi_ulong uc_link;
+ target_stack_t uc_stack;
+ int32_t uc_flags;
+ int32_t __spare__[4];
+} target_ucontext_t;
+
+G_STATIC_ASSERT(TARGET_MCONTEXT_SIZE == sizeof(target_mcontext_t));
+G_STATIC_ASSERT(TARGET_UCONTEXT_SIZE == sizeof(target_ucontext_t));
+
+struct target_sigframe;
+
+abi_long set_sigtramp_args(CPUArchState *env, int sig,
+ struct target_sigframe *frame,
+ abi_ulong frame_addr,
+ struct target_sigaction *ka);
+abi_long get_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags);
+abi_long set_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int srflag);
+abi_long get_ucontext_sigreturn(CPUArchState *regs, abi_ulong target_sf,
+ abi_ulong *target_uc);
+
+#endif /* TARGET_OS_UCONTEXT_H */
diff --git a/bsd-user/i386/signal.c b/bsd-user/i386/signal.c
new file mode 100644
index 0000000000..2939d32400
--- /dev/null
+++ b/bsd-user/i386/signal.c
@@ -0,0 +1,55 @@
+/*
+ * i386 dependent signal definitions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu.h"
+
+/*
+ * Compare to i386/i386/machdep.c sendsig()
+ * Assumes that target stack frame memory is locked.
+ */
+abi_long set_sigtramp_args(CPUX86State *env, int sig,
+ struct target_sigframe *frame,
+ abi_ulong frame_addr,
+ struct target_sigaction *ka)
+{
+ /* XXX return -TARGET_EOPNOTSUPP; */
+ return 0;
+}
+
+/* Compare to i386/i386/machdep.c get_mcontext() */
+abi_long get_mcontext(CPUX86State *regs, target_mcontext_t *mcp, int flags)
+{
+ /* XXX */
+ return -TARGET_EOPNOTSUPP;
+}
+
+/* Compare to i386/i386/machdep.c set_mcontext() */
+abi_long set_mcontext(CPUX86State *regs, target_mcontext_t *mcp, int srflag)
+{
+ /* XXX */
+ return -TARGET_EOPNOTSUPP;
+}
+
+abi_long get_ucontext_sigreturn(CPUX86State *regs, abi_ulong target_sf,
+ abi_ulong *target_uc)
+{
+ /* XXX */
+ *target_uc = 0;
+ return -TARGET_EOPNOTSUPP;
+}
diff --git a/bsd-user/i386/target_arch_signal.h b/bsd-user/i386/target_arch_signal.h
index a90750d602..279dadc22c 100644
--- a/bsd-user/i386/target_arch_signal.h
+++ b/bsd-user/i386/target_arch_signal.h
@@ -27,21 +27,56 @@
#define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */
#define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */
-struct target_sigcontext {
- /* to be added */
-};
-
typedef struct target_mcontext {
+ abi_ulong mc_onstack; /* XXX - sigcontext compat. */
+ abi_ulong mc_gs; /* machine state (struct trapframe) */
+ abi_ulong mc_fs;
+ abi_ulong mc_es;
+ abi_ulong mc_ds;
+ abi_ulong mc_edi;
+ abi_ulong mc_esi;
+ abi_ulong mc_ebp;
+ abi_ulong mc_isp;
+ abi_ulong mc_ebx;
+ abi_ulong mc_edx;
+ abi_ulong mc_ecx;
+ abi_ulong mc_eax;
+ abi_ulong mc_trapno;
+ abi_ulong mc_err;
+ abi_ulong mc_eip;
+ abi_ulong mc_cs;
+ abi_ulong mc_eflags;
+ abi_ulong mc_esp;
+ abi_ulong mc_ss;
+
+ int32_t mc_len; /* sizeof(mcontext_t) */
+#define _MC_FPFMT_NODEV 0x10000 /* device not present or configured */
+#define _MC_FPFMT_387 0x10001
+#define _MC_FPFMT_XMM 0x10002
+ int32_t mc_fpformat;
+#define _MC_FPOWNED_NONE 0x20000 /* FP state not used */
+#define _MC_FPOWNED_FPU 0x20001 /* FP state came from FPU */
+#define _MC_FPOWNED_PCB 0x20002 /* FP state came from PCB */
+ int32_t mc_ownedfp;
+ abi_ulong mc_flags;
+ /*
+ * See <machine/npx.h> for the internals of mc_fpstate[].
+ */
+ int32_t mc_fpstate[128] __aligned(16);
+
+ abi_ulong mc_fsbase;
+ abi_ulong mc_gsbase;
+
+ abi_ulong mc_xfpustate;
+ abi_ulong mc_xfpustate_len;
+
+ int32_t mc_spare2[4];
} target_mcontext_t;
-typedef struct target_ucontext {
- target_sigset_t uc_sigmask;
- target_mcontext_t uc_mcontext;
- abi_ulong uc_link;
- target_stack_t uc_stack;
- int32_t uc_flags;
- int32_t __spare__[4];
-} target_ucontext_t;
+#define TARGET_MCONTEXT_SIZE 640
+#define TARGET_UCONTEXT_SIZE 704
+
+#include "target_os_ucontext.h"
struct target_sigframe {
abi_ulong sf_signum;
@@ -53,40 +88,4 @@ struct target_sigframe {
uint32_t __spare__[2];
};
-/*
- * Compare to i386/i386/machdep.c sendsig()
- * Assumes that target stack frame memory is locked.
- */
-static inline abi_long set_sigtramp_args(CPUX86State *regs,
- int sig, struct target_sigframe *frame, abi_ulong frame_addr,
- struct target_sigaction *ka)
-{
- /* XXX return -TARGET_EOPNOTSUPP; */
- return 0;
-}
-
-/* Compare to i386/i386/machdep.c get_mcontext() */
-static inline abi_long get_mcontext(CPUX86State *regs,
- target_mcontext_t *mcp, int flags)
-{
- /* XXX */
- return -TARGET_EOPNOTSUPP;
-}
-
-/* Compare to i386/i386/machdep.c set_mcontext() */
-static inline abi_long set_mcontext(CPUX86State *regs,
- target_mcontext_t *mcp, int srflag)
-{
- /* XXX */
- return -TARGET_EOPNOTSUPP;
-}
-
-static inline abi_long get_ucontext_sigreturn(CPUX86State *regs,
- abi_ulong target_sf, abi_ulong *target_uc)
-{
- /* XXX */
- *target_uc = 0;
- return -TARGET_EOPNOTSUPP;
-}
-
#endif /* TARGET_ARCH_SIGNAL_H */
diff --git a/bsd-user/mips/target_arch_sysarch.h b/bsd-user/mips/target_arch_sysarch.h
deleted file mode 100644
index 6da803a408..0000000000
--- a/bsd-user/mips/target_arch_sysarch.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * mips sysarch() system call emulation
- *
- * Copyright (c) 2013 Stacey D. Son
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef BSD_USER_ARCH_SYSARCH_H_
-#define BSD_USER_ARCH_SYSARCH_H_
-
-#include "target_syscall.h"
-#include "target_arch.h"
-
-static inline abi_long do_freebsd_arch_sysarch(CPUMIPSState *env, int op,
- abi_ulong parms)
-{
- int ret = 0;
-
- switch (op) {
- case TARGET_MIPS_SET_TLS:
- target_cpu_set_tls(env, parms);
- break;
-
- case TARGET_MIPS_GET_TLS:
- if (put_user(target_cpu_get_tls(env), parms, abi_ulong)) {
- ret = -TARGET_EFAULT;
- }
- break;
-
- default:
- ret = -TARGET_EINVAL;
- break;
- }
-
- return ret;
-}
-
-static inline void do_freebsd_arch_print_sysarch(
- const struct syscallname *name, abi_long arg1, abi_long arg2,
- abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6)
-{
-
- switch (arg1) {
- case TARGET_MIPS_SET_TLS:
- gemu_log("%s(SET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2);
- break;
-
- case TARGET_MIPS_GET_TLS:
- gemu_log("%s(GET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2);
- break;
-
- default:
- gemu_log("UNKNOWN OP: %d, " TARGET_ABI_FMT_lx ")", (int)arg1, arg2);
- }
-}
-
-#endif /*!BSD_USER_ARCH_SYSARCH_H_ */
diff --git a/bsd-user/mips/target_syscall.h b/bsd-user/mips/target_syscall.h
deleted file mode 100644
index aacc6ddf9f..0000000000
--- a/bsd-user/mips/target_syscall.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * mips system call definitions
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef _MIPS_SYSCALL_H_
-#define _MIPS_SYSCALL_H_
-
-/*
- * struct target_pt_regs defines the way the registers are stored on the stack
- * during a system call.
- */
-
-struct target_pt_regs {
- /* Saved main processor registers. */
- abi_ulong regs[32];
-
- /* Saved special registers. */
- abi_ulong cp0_status;
- abi_ulong lo;
- abi_ulong hi;
- abi_ulong cp0_badvaddr;
- abi_ulong cp0_cause;
- abi_ulong cp0_epc;
-};
-
-#if defined(TARGET_WORDS_BIGENDIAN)
-#define UNAME_MACHINE "mips"
-#else
-#define UNAME_MACHINE "mipsel"
-#endif
-
-#define TARGET_HW_MACHINE "mips"
-#define TARGET_HW_MACHINE_ARCH UNAME_MACHINE
-
-/* sysarch() commands */
-#define TARGET_MIPS_SET_TLS 1
-#define TARGET_MIPS_GET_TLS 2
-
-#endif /* !_MIPS_SYSCALL_H_ */
diff --git a/bsd-user/mips64/target_arch_sysarch.h b/bsd-user/mips64/target_arch_sysarch.h
deleted file mode 100644
index e6f9c00d5f..0000000000
--- a/bsd-user/mips64/target_arch_sysarch.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * mips64 sysarch() system call emulation
- *
- * Copyright (c) 2013 Stacey D. Son
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef BSD_USER_ARCH_SYSARCH_H_
-#define BSD_USER_ARCH_SYSARCH_H_
-
-#include "target_syscall.h"
-#include "target_arch.h"
-
-static inline abi_long do_freebsd_arch_sysarch(CPUMIPSState *env, int op,
- abi_ulong parms)
-{
- int ret = 0;
-
- switch (op) {
- case TARGET_MIPS_SET_TLS:
- target_cpu_set_tls(env, parms);
- break;
-
- case TARGET_MIPS_GET_TLS:
- if (put_user(target_cpu_get_tls(env), parms, abi_ulong)) {
- ret = -TARGET_EFAULT;
- }
- break;
-
- default:
- ret = -TARGET_EINVAL;
- break;
- }
-
- return ret;
-}
-
-static inline void do_freebsd_arch_print_sysarch(
- const struct syscallname *name, abi_long arg1, abi_long arg2,
- abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6)
-{
-
- switch (arg1) {
- case TARGET_MIPS_SET_TLS:
- gemu_log("%s(SET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2);
- break;
-
- case TARGET_MIPS_GET_TLS:
- gemu_log("%s(GET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2);
- break;
-
- default:
- gemu_log("UNKNOWN OP: %d, " TARGET_ABI_FMT_lx ")", (int)arg1, arg2);
- }
-}
-
-#endif /*!BSD_USER_ARCH_SYSARCH_H_ */
diff --git a/bsd-user/mips64/target_syscall.h b/bsd-user/mips64/target_syscall.h
deleted file mode 100644
index bf4c598b13..0000000000
--- a/bsd-user/mips64/target_syscall.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * mips64 system call definitions
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef _MIPS64_SYSCALL_H_
-#define _MIPS64_SYSCALL_H_
-
-/*
- * struct target_pt_regs defines the way the registers are stored on the stack
- * during a system call.
- */
-
-struct target_pt_regs {
- /* Saved main processor registers. */
- abi_ulong regs[32];
-
- /* Saved special registers. */
- abi_ulong cp0_status;
- abi_ulong lo;
- abi_ulong hi;
- abi_ulong cp0_badvaddr;
- abi_ulong cp0_cause;
- abi_ulong cp0_epc;
-};
-
-
-#if defined(TARGET_WORDS_BIGENDIAN)
-#define UNAME_MACHINE "mips64"
-#else
-#define UNAME_MACHINE "mips64el"
-#endif
-
-#define TARGET_HW_MACHINE "mips"
-#define TARGET_HW_MACHINE_ARCH UNAME_MACHINE
-
-/* sysarch() commands */
-#define TARGET_MIPS_SET_TLS 1
-#define TARGET_MIPS_GET_TLS 2
-
-#endif /* !_MIPS64_SYSCALL_H_ */
diff --git a/bsd-user/x86_64/signal.c b/bsd-user/x86_64/signal.c
new file mode 100644
index 0000000000..8885152a7d
--- /dev/null
+++ b/bsd-user/x86_64/signal.c
@@ -0,0 +1,55 @@
+/*
+ * x86_64 signal definitions
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu.h"
+
+/*
+ * Compare to amd64/amd64/machdep.c sendsig()
+ * Assumes that target stack frame memory is locked.
+ */
+abi_long set_sigtramp_args(CPUX86State *regs,
+ int sig, struct target_sigframe *frame, abi_ulong frame_addr,
+ struct target_sigaction *ka)
+{
+ /* XXX return -TARGET_EOPNOTSUPP; */
+ return 0;
+}
+
+/* Compare to amd64/amd64/machdep.c get_mcontext() */
+abi_long get_mcontext(CPUX86State *regs,
+ target_mcontext_t *mcp, int flags)
+{
+ /* XXX */
+ return -TARGET_EOPNOTSUPP;
+}
+
+/* Compare to amd64/amd64/machdep.c set_mcontext() */
+abi_long set_mcontext(CPUX86State *regs,
+ target_mcontext_t *mcp, int srflag)
+{
+ /* XXX */
+ return -TARGET_EOPNOTSUPP;
+}
+
+abi_long get_ucontext_sigreturn(CPUX86State *regs,
+ abi_ulong target_sf, abi_ulong *target_uc)
+{
+ /* XXX */
+ *target_uc = 0;
+ return -TARGET_EOPNOTSUPP;
+}
diff --git a/bsd-user/x86_64/target_arch_signal.h b/bsd-user/x86_64/target_arch_signal.h
index 4bb753b08b..b4a0ebf2bd 100644
--- a/bsd-user/x86_64/target_arch_signal.h
+++ b/bsd-user/x86_64/target_arch_signal.h
@@ -27,21 +27,64 @@
#define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */
#define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */
-struct target_sigcontext {
- /* to be added */
-};
-
typedef struct target_mcontext {
+ abi_ulong mc_onstack; /* XXX - sigcontext compat. */
+ abi_ulong mc_rdi; /* machine state (struct trapframe) */
+ abi_ulong mc_rsi;
+ abi_ulong mc_rdx;
+ abi_ulong mc_rcx;
+ abi_ulong mc_r8;
+ abi_ulong mc_r9;
+ abi_ulong mc_rax;
+ abi_ulong mc_rbx;
+ abi_ulong mc_rbp;
+ abi_ulong mc_r10;
+ abi_ulong mc_r11;
+ abi_ulong mc_r12;
+ abi_ulong mc_r13;
+ abi_ulong mc_r14;
+ abi_ulong mc_r15;
+ uint32_t mc_trapno;
+ uint16_t mc_fs;
+ uint16_t mc_gs;
+ abi_ulong mc_addr;
+ uint32_t mc_flags;
+ uint16_t mc_es;
+ uint16_t mc_ds;
+ abi_ulong mc_err;
+ abi_ulong mc_rip;
+ abi_ulong mc_cs;
+ abi_ulong mc_rflags;
+ abi_ulong mc_rsp;
+ abi_ulong mc_ss;
+
+ abi_long mc_len; /* sizeof(mcontext_t) */
+
+#define _MC_FPFMT_NODEV 0x10000 /* device not present or configured */
+#define _MC_FPFMT_XMM 0x10002
+ abi_long mc_fpformat;
+#define _MC_FPOWNED_NONE 0x20000 /* FP state not used */
+#define _MC_FPOWNED_FPU 0x20001 /* FP state came from FPU */
+#define _MC_FPOWNED_PCB 0x20002 /* FP state came from PCB */
+ abi_long mc_ownedfp;
+ /*
+ * See <machine/fpu.h> for the internals of mc_fpstate[].
+ */
+ abi_long mc_fpstate[64] __aligned(16);
+
+ abi_ulong mc_fsbase;
+ abi_ulong mc_gsbase;
+
+ abi_ulong mc_xfpustate;
+ abi_ulong mc_xfpustate_len;
+
+ abi_long mc_spare[4];
} target_mcontext_t;
-typedef struct target_ucontext {
- target_sigset_t uc_sigmask;
- target_mcontext_t uc_mcontext;
- abi_ulong uc_link;
- target_stack_t uc_stack;
- int32_t uc_flags;
- int32_t __spare__[4];
-} target_ucontext_t;
+#define TARGET_MCONTEXT_SIZE 800
+#define TARGET_UCONTEXT_SIZE 880
+
+#include "target_os_ucontext.h"
struct target_sigframe {
abi_ulong sf_signum;
@@ -53,40 +96,4 @@ struct target_sigframe {
uint32_t __spare__[2];
};
-/*
- * Compare to amd64/amd64/machdep.c sendsig()
- * Assumes that target stack frame memory is locked.
- */
-static inline abi_long set_sigtramp_args(CPUX86State *regs,
- int sig, struct target_sigframe *frame, abi_ulong frame_addr,
- struct target_sigaction *ka)
-{
- /* XXX return -TARGET_EOPNOTSUPP; */
- return 0;
-}
-
-/* Compare to amd64/amd64/machdep.c get_mcontext() */
-static inline abi_long get_mcontext(CPUX86State *regs,
- target_mcontext_t *mcp, int flags)
-{
- /* XXX */
- return -TARGET_EOPNOTSUPP;
-}
-
-/* Compare to amd64/amd64/machdep.c set_mcontext() */
-static inline abi_long set_mcontext(CPUX86State *regs,
- target_mcontext_t *mcp, int srflag)
-{
- /* XXX */
- return -TARGET_EOPNOTSUPP;
-}
-
-static inline abi_long get_ucontext_sigreturn(CPUX86State *regs,
- abi_ulong target_sf, abi_ulong *target_uc)
-{
- /* XXX */
- *target_uc = 0;
- return -TARGET_EOPNOTSUPP;
-}
-
#endif /* !TARGET_ARCH_SIGNAL_H_ */
diff --git a/chardev/char-socket.c b/chardev/char-socket.c
index d619088232..fab2d791d4 100644
--- a/chardev/char-socket.c
+++ b/chardev/char-socket.c
@@ -290,13 +290,6 @@ static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len)
NULL);
}
- if (ret == QIO_CHANNEL_ERR_BLOCK) {
- errno = EAGAIN;
- ret = -1;
- } else if (ret == -1) {
- errno = EIO;
- }
-
if (msgfds_num) {
/* close and clean read_msgfds */
for (i = 0; i < s->read_msgfds_num; i++) {
@@ -325,6 +318,13 @@ static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len)
#endif
}
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ errno = EAGAIN;
+ ret = -1;
+ } else if (ret == -1) {
+ errno = EIO;
+ }
+
return ret;
}
@@ -525,6 +525,7 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len)
{
SocketChardev *s = SOCKET_CHARDEV(chr);
int size;
+ int saved_errno;
if (s->state != TCP_CHARDEV_STATE_CONNECTED) {
return 0;
@@ -532,6 +533,7 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len)
qio_channel_set_blocking(s->ioc, true, NULL);
size = tcp_chr_recv(chr, (void *) buf, len);
+ saved_errno = errno;
if (s->state != TCP_CHARDEV_STATE_DISCONNECTED) {
qio_channel_set_blocking(s->ioc, false, NULL);
}
@@ -540,6 +542,7 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len)
tcp_chr_disconnect(chr);
}
+ errno = saved_errno;
return size;
}
diff --git a/common-user/host/i386/safe-syscall.inc.S b/common-user/host/i386/safe-syscall.inc.S
index baf5400a29..db2ed09839 100644
--- a/common-user/host/i386/safe-syscall.inc.S
+++ b/common-user/host/i386/safe-syscall.inc.S
@@ -120,6 +120,7 @@ safe_syscall_end:
pop %ebp
.cfi_adjust_cfa_offset -4
.cfi_restore ebp
+ mov %eax, 4(%esp)
jmp safe_syscall_set_errno_tail
.cfi_endproc
diff --git a/common-user/host/mips/safe-syscall.inc.S b/common-user/host/mips/safe-syscall.inc.S
index fc75a337d1..6a44614970 100644
--- a/common-user/host/mips/safe-syscall.inc.S
+++ b/common-user/host/mips/safe-syscall.inc.S
@@ -141,6 +141,7 @@ safe_syscall_end:
1: USE_ALT_CP(t0)
SETUP_GPX(t1)
SETUP_GPX64(t0, t1)
+ move a0, v0
PTR_LA t9, safe_syscall_set_errno_tail
jr t9
diff --git a/common-user/host/x86_64/safe-syscall.inc.S b/common-user/host/x86_64/safe-syscall.inc.S
index a20927a783..d1a67a303a 100644
--- a/common-user/host/x86_64/safe-syscall.inc.S
+++ b/common-user/host/x86_64/safe-syscall.inc.S
@@ -99,6 +99,7 @@ safe_syscall_end:
1: pop %rbp
.cfi_def_cfa_offset 8
.cfi_restore rbp
+ mov %eax, %edi
jmp safe_syscall_set_errno_tail
.cfi_endproc
diff --git a/configs/targets/arm-bsd-user.mak b/configs/targets/arm-bsd-user.mak
new file mode 100644
index 0000000000..cb143e6426
--- /dev/null
+++ b/configs/targets/arm-bsd-user.mak
@@ -0,0 +1,2 @@
+TARGET_ARCH=arm
+TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml
diff --git a/cpu.c b/cpu.c
index 945dd3dded..016bf06a1a 100644
--- a/cpu.c
+++ b/cpu.c
@@ -174,13 +174,23 @@ void cpu_exec_unrealizefn(CPUState *cpu)
cpu_list_remove(cpu);
}
+/*
+ * This can't go in hw/core/cpu.c because that file is compiled only
+ * once for both user-mode and system builds.
+ */
static Property cpu_common_props[] = {
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Create a property for the user-only object, so users can
+ * adjust prctl(PR_SET_UNALIGN) from the command-line.
+ * Has no effect if the target does not support the feature.
+ */
+ DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
+ prctl_unalign_sigbus, false),
+#else
/*
- * Create a memory property for softmmu CPU object,
- * so users can wire up its memory. (This can't go in hw/core/cpu.c
- * because that file is compiled only once for both user-mode
- * and system builds.) The default if no link is set up is to use
+ * Create a memory property for softmmu CPU object, so users can
+ * wire up its memory. The default if no link is set up is to use
* the system address space.
*/
DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
diff --git a/disas/riscv.c b/disas/riscv.c
index 793ad14c27..03c8dc9961 100644
--- a/disas/riscv.c
+++ b/disas/riscv.c
@@ -3090,3 +3090,8 @@ int print_insn_riscv64(bfd_vma memaddr, struct disassemble_info *info)
{
return print_insn_riscv(memaddr, info, rv64);
}
+
+int print_insn_riscv128(bfd_vma memaddr, struct disassemble_info *info)
+{
+ return print_insn_riscv(memaddr, info, rv128);
+}
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
index 5693abb663..e21e07478f 100644
--- a/docs/about/deprecated.rst
+++ b/docs/about/deprecated.rst
@@ -134,12 +134,6 @@ specified.
Use ``-display sdl,window-close=...`` instead (i.e. with a minus instead of
an underscore between "window" and "close").
-``-no-quit`` (since 6.1)
-''''''''''''''''''''''''
-
-The ``-no-quit`` is a synonym for ``-display ...,window-close=off`` which
-should be used instead.
-
``-alt-grab`` and ``-display sdl,alt_grab=on`` (since 6.2)
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
diff --git a/docs/about/removed-features.rst b/docs/about/removed-features.rst
index d42c3341de..4c4da20d0f 100644
--- a/docs/about/removed-features.rst
+++ b/docs/about/removed-features.rst
@@ -330,6 +330,13 @@ RISC-V firmware not booted by default (removed in 5.1)
QEMU 5.1 changes the default behaviour from ``-bios none`` to ``-bios default``
for the RISC-V ``virt`` machine and ``sifive_u`` machine.
+``-no-quit`` (removed in 7.0)
+'''''''''''''''''''''''''''''
+
+The ``-no-quit`` was a synonym for ``-display ...,window-close=off`` which
+should be used instead.
+
+
QEMU Machine Protocol (QMP) commands
------------------------------------
diff --git a/docs/interop/index.rst b/docs/interop/index.rst
index c59bac9834..b7632acb7b 100644
--- a/docs/interop/index.rst
+++ b/docs/interop/index.rst
@@ -22,3 +22,4 @@ are useful for making QEMU interoperate with other software.
vhost-user
vhost-user-gpu
vhost-vdpa
+ virtio-balloon-stats
diff --git a/docs/virtio-balloon-stats.txt b/docs/interop/virtio-balloon-stats.rst
index 1732cc8c8a..b9a6a6edb2 100644
--- a/docs/virtio-balloon-stats.txt
+++ b/docs/interop/virtio-balloon-stats.rst
@@ -1,4 +1,4 @@
-virtio balloon memory statistics
+Virtio balloon memory statistics
================================
The virtio balloon driver supports guest memory statistics reporting. These
@@ -9,10 +9,12 @@ Before querying the available stats, clients first have to enable polling.
This is done by writing a time interval value (in seconds) to the
guest-stats-polling-interval property. This value can be:
- > 0 enables polling in the specified interval. If polling is already
+ > 0
+ enables polling in the specified interval. If polling is already
enabled, the polling time interval is changed to the new value
- 0 disables polling. Previous polled statistics are still valid and
+ 0
+ disables polling. Previous polled statistics are still valid and
can be queried.
Once polling is enabled, the virtio-balloon device in QEMU will start
@@ -22,7 +24,7 @@ interval.
To retrieve those stats, clients have to query the guest-stats property,
which will return a dictionary containing:
- o A key named 'stats', containing all available stats. If the guest
+ * A key named 'stats', containing all available stats. If the guest
doesn't support a particular stat, or if it couldn't be retrieved,
its value will be -1. Currently, the following stats are supported:
@@ -37,7 +39,7 @@ which will return a dictionary containing:
- stat-htlb-pgalloc
- stat-htlb-pgfail
- o A key named last-update, which contains the last stats update
+ * A key named last-update, which contains the last stats update
timestamp in seconds. Since this timestamp is generated by the host,
a buggy guest can't influence its value. The value is 0 if the guest
has not updated the stats (yet).
@@ -61,32 +63,32 @@ It's also important to note the following:
respond to the request the timer will never be re-armed, which has
the same effect as disabling polling
-Here are a few examples. QEMU is started with '-device virtio-balloon',
-which generates '/machine/peripheral-anon/device[1]' as the QOM path for
+Here are a few examples. QEMU is started with ``-device virtio-balloon``,
+which generates ``/machine/peripheral-anon/device[1]`` as the QOM path for
the balloon device.
-Enable polling with 2 seconds interval:
+Enable polling with 2 seconds interval::
-{ "execute": "qom-set",
- "arguments": { "path": "/machine/peripheral-anon/device[1]",
- "property": "guest-stats-polling-interval", "value": 2 } }
+ { "execute": "qom-set",
+ "arguments": { "path": "/machine/peripheral-anon/device[1]",
+ "property": "guest-stats-polling-interval", "value": 2 } }
-{ "return": {} }
+ { "return": {} }
-Change polling to 10 seconds:
+Change polling to 10 seconds::
-{ "execute": "qom-set",
- "arguments": { "path": "/machine/peripheral-anon/device[1]",
- "property": "guest-stats-polling-interval", "value": 10 } }
+ { "execute": "qom-set",
+ "arguments": { "path": "/machine/peripheral-anon/device[1]",
+ "property": "guest-stats-polling-interval", "value": 10 } }
-{ "return": {} }
+ { "return": {} }
-Get stats:
+Get stats::
-{ "execute": "qom-get",
- "arguments": { "path": "/machine/peripheral-anon/device[1]",
- "property": "guest-stats" } }
-{
+ { "execute": "qom-get",
+ "arguments": { "path": "/machine/peripheral-anon/device[1]",
+ "property": "guest-stats" } }
+ {
"return": {
"stats": {
"stat-swap-out": 0,
@@ -98,12 +100,12 @@ Get stats:
},
"last-update": 1358529861
}
-}
+ }
-Disable polling:
+Disable polling::
-{ "execute": "qom-set",
- "arguments": { "path": "/machine/peripheral-anon/device[1]",
- "property": "stats-polling-interval", "value": 0 } }
+ { "execute": "qom-set",
+ "arguments": { "path": "/machine/peripheral-anon/device[1]",
+ "property": "stats-polling-interval", "value": 0 } }
-{ "return": {} }
+ { "return": {} }
diff --git a/docs/sphinx/fakedbusdoc.py b/docs/sphinx/fakedbusdoc.py
index a680b25754..d2c5079046 100644
--- a/docs/sphinx/fakedbusdoc.py
+++ b/docs/sphinx/fakedbusdoc.py
@@ -7,12 +7,12 @@
# Author: Marc-André Lureau <marcandre.lureau@redhat.com>
"""dbus-doc is a Sphinx extension that provides documentation from D-Bus XML."""
+from docutils.parsers.rst import Directive
from sphinx.application import Sphinx
-from sphinx.util.docutils import SphinxDirective
from typing import Any, Dict
-class FakeDBusDocDirective(SphinxDirective):
+class FakeDBusDocDirective(Directive):
has_content = True
required_arguments = 1
diff --git a/docs/tools/qemu-trace-stap.rst b/docs/tools/qemu-trace-stap.rst
index d53073b52b..2169ce5d17 100644
--- a/docs/tools/qemu-trace-stap.rst
+++ b/docs/tools/qemu-trace-stap.rst
@@ -46,19 +46,19 @@ The following commands are valid:
any of the listed names. If no *PATTERN* is given, the all possible
probes will be listed.
- For example, to list all probes available in the ``qemu-system-x86_64``
+ For example, to list all probes available in the |qemu_system|
binary:
- ::
+ .. parsed-literal::
- $ qemu-trace-stap list qemu-system-x86_64
+ $ qemu-trace-stap list |qemu_system|
To filter the list to only cover probes related to QEMU's cryptographic
subsystem, in a binary outside ``$PATH``
- ::
+ .. parsed-literal::
- $ qemu-trace-stap list /opt/qemu/4.0.0/bin/qemu-system-x86_64 'qcrypto*'
+ $ qemu-trace-stap list /opt/qemu/|version|/bin/|qemu_system| 'qcrypto*'
.. option:: run OPTIONS BINARY PATTERN...
@@ -90,26 +90,26 @@ The following commands are valid:
Restrict the tracing session so that it only triggers for the process
identified by *PID*.
- For example, to monitor all processes executing ``qemu-system-x86_64``
+ For example, to monitor all processes executing |qemu_system|
as found on ``$PATH``, displaying all I/O related probes:
- ::
+ .. parsed-literal::
- $ qemu-trace-stap run qemu-system-x86_64 'qio*'
+ $ qemu-trace-stap run |qemu_system| 'qio*'
To monitor only the QEMU process with PID 1732
- ::
+ .. parsed-literal::
- $ qemu-trace-stap run --pid=1732 qemu-system-x86_64 'qio*'
+ $ qemu-trace-stap run --pid=1732 |qemu_system| 'qio*'
To monitor QEMU processes running an alternative binary outside of
``$PATH``, displaying verbose information about setup of the
tracing environment:
- ::
+ .. parsed-literal::
- $ qemu-trace-stap -v run /opt/qemu/4.0.0/qemu-system-x86_64 'qio*'
+ $ qemu-trace-stap -v run /opt/qemu/|version|/bin/|qemu_system| 'qio*'
See also
--------
diff --git a/hw/acpi/core.c b/hw/acpi/core.c
index 1e004d0078..3e811bf03c 100644
--- a/hw/acpi/core.c
+++ b/hw/acpi/core.c
@@ -345,8 +345,8 @@ int acpi_get_slic_oem(AcpiSlicOem *oem)
struct acpi_table_header *hdr = (void *)(u - sizeof(hdr->_length));
if (memcmp(hdr->sig, "SLIC", 4) == 0) {
- oem->id = hdr->oem_id;
- oem->table_id = hdr->oem_table_id;
+ oem->id = g_strndup(hdr->oem_id, 6);
+ oem->table_id = g_strndup(hdr->oem_table_id, 8);
return 0;
}
}
diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c
index 30405b5113..6befd23e16 100644
--- a/hw/acpi/pcihp.c
+++ b/hw/acpi/pcihp.c
@@ -128,20 +128,15 @@ static void acpi_set_pci_info(void)
static void acpi_pcihp_disable_root_bus(void)
{
- static bool root_hp_disabled;
Object *host = acpi_get_i386_pci_host();
PCIBus *bus;
- if (root_hp_disabled) {
- return;
- }
-
bus = PCI_HOST_BRIDGE(host)->bus;
- if (bus) {
+ if (bus && qbus_is_hotpluggable(BUS(bus))) {
/* setting the hotplug handler to NULL makes the bus non-hotpluggable */
qbus_set_hotplug_handler(BUS(bus), NULL);
}
- root_hp_disabled = true;
+
return;
}
@@ -491,6 +486,9 @@ static void pci_write(void *opaque, hwaddr addr, uint64_t data,
}
bus = acpi_pcihp_find_hotplug_bus(s, s->hotplug_select);
+ if (!bus) {
+ break;
+ }
QTAILQ_FOREACH_SAFE(kid, &bus->qbus.children, sibling, next) {
Object *o = OBJECT(kid->child);
PCIDevice *dev = PCI_DEVICE(o);
diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c
index 0384357a95..e33483fb5d 100644
--- a/hw/arm/aspeed_ast2600.c
+++ b/hw/arm/aspeed_ast2600.c
@@ -19,9 +19,11 @@
#include "sysemu/sysemu.h"
#define ASPEED_SOC_IOMEM_SIZE 0x00200000
+#define ASPEED_SOC_DPMCU_SIZE 0x00040000
static const hwaddr aspeed_soc_ast2600_memmap[] = {
[ASPEED_DEV_SRAM] = 0x10000000,
+ [ASPEED_DEV_DPMCU] = 0x18000000,
/* 0x16000000 0x17FFFFFF : AHB BUS do LPC Bus bridge */
[ASPEED_DEV_IOMEM] = 0x1E600000,
[ASPEED_DEV_PWM] = 0x1E610000,
@@ -44,6 +46,7 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = {
[ASPEED_DEV_SCU] = 0x1E6E2000,
[ASPEED_DEV_XDMA] = 0x1E6E7000,
[ASPEED_DEV_ADC] = 0x1E6E9000,
+ [ASPEED_DEV_DP] = 0x1E6EB000,
[ASPEED_DEV_VIDEO] = 0x1E700000,
[ASPEED_DEV_SDHCI] = 0x1E740000,
[ASPEED_DEV_EMMC] = 0x1E750000,
@@ -104,6 +107,7 @@ static const int aspeed_soc_ast2600_irqmap[] = {
[ASPEED_DEV_ETH3] = 32,
[ASPEED_DEV_ETH4] = 33,
[ASPEED_DEV_KCS] = 138, /* 138 -> 142 */
+ [ASPEED_DEV_DP] = 62,
};
static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl)
@@ -298,6 +302,10 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion(get_system_memory(),
sc->memmap[ASPEED_DEV_SRAM], &s->sram);
+ /* DPMCU */
+ create_unimplemented_device("aspeed.dpmcu", sc->memmap[ASPEED_DEV_DPMCU],
+ ASPEED_SOC_DPMCU_SIZE);
+
/* SCU */
if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) {
return;
diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c
index 0866d2f4f0..7d0f3148be 100644
--- a/hw/arm/npcm7xx_boards.c
+++ b/hw/arm/npcm7xx_boards.c
@@ -328,6 +328,31 @@ static void quanta_gbs_i2c_init(NPCM7xxState *soc)
*/
}
+static void kudo_bmc_i2c_init(NPCM7xxState *soc)
+{
+ I2CSlave *i2c_mux;
+
+ i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), TYPE_PCA9548, 0x75);
+ i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), TYPE_PCA9548, 0x77);
+
+ i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 4), TYPE_PCA9548, 0x77);
+
+ at24c_eeprom_init(soc, 4, 0x50, 8192); /* mbfru */
+
+ i2c_mux = i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 13),
+ TYPE_PCA9548, 0x77);
+
+ /* tmp105 is compatible with the lm75 */
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 2), "tmp105", 0x48);
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 3), "tmp105", 0x49);
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 4), "tmp105", 0x48);
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 5), "tmp105", 0x49);
+
+ at24c_eeprom_init(soc, 14, 0x55, 8192); /* bmcfru */
+
+ /* TODO: Add remaining i2c devices. */
+}
+
static void npcm750_evb_init(MachineState *machine)
{
NPCM7xxState *soc;
@@ -391,6 +416,8 @@ static void kudo_bmc_init(MachineState *machine)
npcm7xx_connect_flash(&soc->fiu[1], 0, "mx66u51235f",
drive_get(IF_MTD, 3, 0));
+ kudo_bmc_i2c_init(soc);
+ sdhci_attach_drive(&soc->mmc.sdhci, 0);
npcm7xx_load_kernel(machine, soc);
}
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index d0f4867fdf..f2514ce77c 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -229,6 +229,7 @@ static void acpi_dsdt_add_tpm(Aml *scope, VirtMachineState *vms)
Aml *dev = aml_device("TPM0");
aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101")));
+ aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device")));
aml_append(dev, aml_name_decl("_UID", aml_int(0)));
Aml *crs = aml_resource_template();
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 6bce595aba..b45b52c90e 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -1589,7 +1589,7 @@ static void virt_build_smbios(VirtMachineState *vms)
smbios_set_defaults("QEMU", product,
vmc->smbios_old_sys_ver ? "1.0" : mc->name, false,
- true, SMBIOS_ENTRY_POINT_30);
+ true, SMBIOS_ENTRY_POINT_TYPE_64);
smbios_get_tables(MACHINE(vms), NULL, 0,
&smbios_tables, &smbios_tables_len,
@@ -2856,10 +2856,17 @@ static void machvirt_machine_init(void)
}
type_init(machvirt_machine_init);
+static void virt_machine_7_0_options(MachineClass *mc)
+{
+}
+DEFINE_VIRT_MACHINE_AS_LATEST(7, 0)
+
static void virt_machine_6_2_options(MachineClass *mc)
{
+ virt_machine_7_0_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
}
-DEFINE_VIRT_MACHINE_AS_LATEST(6, 2)
+DEFINE_VIRT_MACHINE(6, 2)
static void virt_machine_6_1_options(MachineClass *mc)
{
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index ba13cb87e5..1a42ae9187 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -100,7 +100,7 @@ static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
&local_err);
if (ret < 0) {
error_report_err(local_err);
- return -1;
+ return ret;
}
/* valid for resize only */
@@ -252,6 +252,7 @@ static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
VHostUserBlk *s = VHOST_USER_BLK(vdev);
/* Turn on pre-defined features */
+ virtio_add_feature(&features, VIRTIO_BLK_F_SIZE_MAX);
virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
@@ -511,7 +512,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
*errp = NULL;
}
ret = vhost_user_blk_realize_connect(s, errp);
- } while (ret == -EPROTO && retries--);
+ } while (ret < 0 && retries--);
if (ret < 0) {
goto virtio_err;
diff --git a/hw/core/machine.c b/hw/core/machine.c
index a4a2df405f..debcdc0e70 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -37,6 +37,9 @@
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-pci.h"
+GlobalProperty hw_compat_6_2[] = {};
+const size_t hw_compat_6_2_len = G_N_ELEMENTS(hw_compat_6_2);
+
GlobalProperty hw_compat_6_1[] = {
{ "vhost-user-vsock-device", "seqpacket", "off" },
{ "nvme-ns", "shared", "off" },
diff --git a/hw/display/macfb.c b/hw/display/macfb.c
index 277d3e6633..4bd7c3ad6a 100644
--- a/hw/display/macfb.c
+++ b/hw/display/macfb.c
@@ -661,9 +661,9 @@ static bool macfb_common_realize(DeviceState *dev, MacfbState *s, Error **errp)
memory_region_init_ram(&s->mem_vram, OBJECT(dev), "macfb-vram",
MACFB_VRAM_SIZE, &error_abort);
+ memory_region_set_log(&s->mem_vram, true, DIRTY_MEMORY_VGA);
s->vram = memory_region_get_ram_ptr(&s->mem_vram);
s->vram_bit_mask = MACFB_VRAM_SIZE - 1;
- memory_region_set_coalescing(&s->mem_vram);
s->vbl_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, macfb_vbl_timer, s);
macfb_update_mode(s);
diff --git a/hw/dma/sifive_pdma.c b/hw/dma/sifive_pdma.c
index 85fe34f5f3..1dd88f3479 100644
--- a/hw/dma/sifive_pdma.c
+++ b/hw/dma/sifive_pdma.c
@@ -177,18 +177,44 @@ static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch)
s->chan[ch].state = DMA_CHAN_STATE_IDLE;
}
-static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
+static uint64_t sifive_pdma_readq(SiFivePDMAState *s, int ch, hwaddr offset)
{
- SiFivePDMAState *s = opaque;
- int ch = SIFIVE_PDMA_CHAN_NO(offset);
uint64_t val = 0;
- if (ch >= SIFIVE_PDMA_CHANS) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
- __func__, ch);
- return 0;
+ offset &= 0xfff;
+ switch (offset) {
+ case DMA_NEXT_BYTES:
+ val = s->chan[ch].next_bytes;
+ break;
+ case DMA_NEXT_DST:
+ val = s->chan[ch].next_dst;
+ break;
+ case DMA_NEXT_SRC:
+ val = s->chan[ch].next_src;
+ break;
+ case DMA_EXEC_BYTES:
+ val = s->chan[ch].exec_bytes;
+ break;
+ case DMA_EXEC_DST:
+ val = s->chan[ch].exec_dst;
+ break;
+ case DMA_EXEC_SRC:
+ val = s->chan[ch].exec_src;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n",
+ __func__, offset);
+ break;
}
+ return val;
+}
+
+static uint32_t sifive_pdma_readl(SiFivePDMAState *s, int ch, hwaddr offset)
+{
+ uint32_t val = 0;
+
offset &= 0xfff;
switch (offset) {
case DMA_CONTROL:
@@ -198,28 +224,47 @@ static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
val = s->chan[ch].next_config;
break;
case DMA_NEXT_BYTES:
- val = s->chan[ch].next_bytes;
+ val = extract64(s->chan[ch].next_bytes, 0, 32);
+ break;
+ case DMA_NEXT_BYTES + 4:
+ val = extract64(s->chan[ch].next_bytes, 32, 32);
break;
case DMA_NEXT_DST:
- val = s->chan[ch].next_dst;
+ val = extract64(s->chan[ch].next_dst, 0, 32);
+ break;
+ case DMA_NEXT_DST + 4:
+ val = extract64(s->chan[ch].next_dst, 32, 32);
break;
case DMA_NEXT_SRC:
- val = s->chan[ch].next_src;
+ val = extract64(s->chan[ch].next_src, 0, 32);
+ break;
+ case DMA_NEXT_SRC + 4:
+ val = extract64(s->chan[ch].next_src, 32, 32);
break;
case DMA_EXEC_CONFIG:
val = s->chan[ch].exec_config;
break;
case DMA_EXEC_BYTES:
- val = s->chan[ch].exec_bytes;
+ val = extract64(s->chan[ch].exec_bytes, 0, 32);
+ break;
+ case DMA_EXEC_BYTES + 4:
+ val = extract64(s->chan[ch].exec_bytes, 32, 32);
break;
case DMA_EXEC_DST:
- val = s->chan[ch].exec_dst;
+ val = extract64(s->chan[ch].exec_dst, 0, 32);
+ break;
+ case DMA_EXEC_DST + 4:
+ val = extract64(s->chan[ch].exec_dst, 32, 32);
break;
case DMA_EXEC_SRC:
- val = s->chan[ch].exec_src;
+ val = extract64(s->chan[ch].exec_src, 0, 32);
+ break;
+ case DMA_EXEC_SRC + 4:
+ val = extract64(s->chan[ch].exec_src, 32, 32);
break;
default:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n",
__func__, offset);
break;
}
@@ -227,19 +272,66 @@ static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
return val;
}
-static void sifive_pdma_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
+static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
{
SiFivePDMAState *s = opaque;
int ch = SIFIVE_PDMA_CHAN_NO(offset);
- bool claimed, run;
+ uint64_t val = 0;
if (ch >= SIFIVE_PDMA_CHANS) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
__func__, ch);
- return;
+ return 0;
+ }
+
+ switch (size) {
+ case 8:
+ val = sifive_pdma_readq(s, ch, offset);
+ break;
+ case 4:
+ val = sifive_pdma_readl(s, ch, offset);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid read size %u to PDMA\n",
+ __func__, size);
+ return 0;
}
+ return val;
+}
+
+static void sifive_pdma_writeq(SiFivePDMAState *s, int ch,
+ hwaddr offset, uint64_t value)
+{
+ offset &= 0xfff;
+ switch (offset) {
+ case DMA_NEXT_BYTES:
+ s->chan[ch].next_bytes = value;
+ break;
+ case DMA_NEXT_DST:
+ s->chan[ch].next_dst = value;
+ break;
+ case DMA_NEXT_SRC:
+ s->chan[ch].next_src = value;
+ break;
+ case DMA_EXEC_BYTES:
+ case DMA_EXEC_DST:
+ case DMA_EXEC_SRC:
+ /* these are read-only registers */
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n",
+ __func__, offset);
+ break;
+ }
+}
+
+static void sifive_pdma_writel(SiFivePDMAState *s, int ch,
+ hwaddr offset, uint32_t value)
+{
+ bool claimed, run;
+
offset &= 0xfff;
switch (offset) {
case DMA_CONTROL:
@@ -282,27 +374,68 @@ static void sifive_pdma_write(void *opaque, hwaddr offset,
s->chan[ch].next_config = value;
break;
case DMA_NEXT_BYTES:
- s->chan[ch].next_bytes = value;
+ s->chan[ch].next_bytes =
+ deposit64(s->chan[ch].next_bytes, 0, 32, value);
+ break;
+ case DMA_NEXT_BYTES + 4:
+ s->chan[ch].next_bytes =
+ deposit64(s->chan[ch].next_bytes, 32, 32, value);
break;
case DMA_NEXT_DST:
- s->chan[ch].next_dst = value;
+ s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 0, 32, value);
+ break;
+ case DMA_NEXT_DST + 4:
+ s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 32, 32, value);
break;
case DMA_NEXT_SRC:
- s->chan[ch].next_src = value;
+ s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 0, 32, value);
+ break;
+ case DMA_NEXT_SRC + 4:
+ s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 32, 32, value);
break;
case DMA_EXEC_CONFIG:
case DMA_EXEC_BYTES:
+ case DMA_EXEC_BYTES + 4:
case DMA_EXEC_DST:
+ case DMA_EXEC_DST + 4:
case DMA_EXEC_SRC:
+ case DMA_EXEC_SRC + 4:
/* these are read-only registers */
break;
default:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n",
__func__, offset);
break;
}
}
+static void sifive_pdma_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ SiFivePDMAState *s = opaque;
+ int ch = SIFIVE_PDMA_CHAN_NO(offset);
+
+ if (ch >= SIFIVE_PDMA_CHANS) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
+ __func__, ch);
+ return;
+ }
+
+ switch (size) {
+ case 8:
+ sifive_pdma_writeq(s, ch, offset, value);
+ break;
+ case 4:
+ sifive_pdma_writel(s, ch, offset, (uint32_t) value);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid write size %u to PDMA\n",
+ __func__, size);
+ break;
+ }
+}
+
static const MemoryRegionOps sifive_pdma_ops = {
.read = sifive_pdma_read,
.write = sifive_pdma_write,
@@ -311,6 +444,10 @@ static const MemoryRegionOps sifive_pdma_ops = {
.impl = {
.min_access_size = 4,
.max_access_size = 8,
+ },
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
}
};
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 8383b83ee3..ce823e8fcb 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -1812,11 +1812,15 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
dev = aml_device("TPM");
aml_append(dev, aml_name_decl("_HID",
aml_string("MSFT0101")));
+ aml_append(dev,
+ aml_name_decl("_STR",
+ aml_string("TPM 2.0 Device")));
} else {
dev = aml_device("ISA.TPM");
aml_append(dev, aml_name_decl("_HID",
aml_eisaid("PNP0C31")));
}
+ aml_append(dev, aml_name_decl("_UID", aml_int(1)));
aml_append(dev, aml_name_decl("_STA", aml_int(0xF)));
crs = aml_resource_template();
@@ -1844,12 +1848,15 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
if (TPM_IS_CRB(tpm)) {
dev = aml_device("TPM");
aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101")));
+ aml_append(dev, aml_name_decl("_STR",
+ aml_string("TPM 2.0 Device")));
crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(TPM_CRB_ADDR_BASE,
TPM_CRB_ADDR_SIZE, AML_READ_WRITE));
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(dev, aml_name_decl("_STA", aml_int(0xf)));
+ aml_append(dev, aml_name_decl("_UID", aml_int(1)));
tpm_build_ppi_acpi(tpm, dev);
@@ -2723,6 +2730,8 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
/* Cleanup memory that's no longer used. */
g_array_free(table_offsets, true);
+ g_free(slic_oem.id);
+ g_free(slic_oem.table_id);
}
static void acpi_ram_update(MemoryRegion *mr, GArray *data)
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 5b865ac08c..4c6c016388 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -1516,11 +1516,29 @@ static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as)
* 1st-level translation or 2nd-level translation, it depends
* on PGTT setting.
*/
-static bool vtd_dev_pt_enabled(VTDAddressSpace *as)
+static bool vtd_dev_pt_enabled(IntelIOMMUState *s, VTDContextEntry *ce)
+{
+ VTDPASIDEntry pe;
+ int ret;
+
+ if (s->root_scalable) {
+ ret = vtd_ce_get_rid2pasid_entry(s, ce, &pe);
+ if (ret) {
+ error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32,
+ __func__, ret);
+ return false;
+ }
+ return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT);
+ }
+
+ return (vtd_ce_get_type(ce) == VTD_CONTEXT_TT_PASS_THROUGH);
+
+}
+
+static bool vtd_as_pt_enabled(VTDAddressSpace *as)
{
IntelIOMMUState *s;
VTDContextEntry ce;
- VTDPASIDEntry pe;
int ret;
assert(as);
@@ -1538,17 +1556,7 @@ static bool vtd_dev_pt_enabled(VTDAddressSpace *as)
return false;
}
- if (s->root_scalable) {
- ret = vtd_ce_get_rid2pasid_entry(s, &ce, &pe);
- if (ret) {
- error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32,
- __func__, ret);
- return false;
- }
- return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT);
- }
-
- return (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH);
+ return vtd_dev_pt_enabled(s, &ce);
}
/* Return whether the device is using IOMMU translation. */
@@ -1560,7 +1568,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as)
assert(as);
- use_iommu = as->iommu_state->dmar_enabled && !vtd_dev_pt_enabled(as);
+ use_iommu = as->iommu_state->dmar_enabled && !vtd_as_pt_enabled(as);
trace_vtd_switch_address_space(pci_bus_num(as->bus),
VTD_PCI_SLOT(as->devfn),
@@ -1753,7 +1761,7 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
* We don't need to translate for pass-through context entries.
* Also, let's ignore IOTLB caching as well for PT devices.
*/
- if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) {
+ if (vtd_dev_pt_enabled(s, &ce)) {
entry->iova = addr & VTD_PAGE_MASK_4K;
entry->translated_addr = entry->iova;
entry->addr_mask = ~VTD_PAGE_MASK_4K;
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index a2ef40ecbc..c8696ac01e 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -77,6 +77,7 @@
#include "hw/mem/nvdimm.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-common.h"
+#include "qapi/qapi-visit-machine.h"
#include "qapi/visitor.h"
#include "hw/core/cpu.h"
#include "hw/usb.h"
@@ -94,6 +95,11 @@
#include "trace.h"
#include CONFIG_DEVICES
+GlobalProperty pc_compat_6_2[] = {
+ { "virtio-mem", "unplugged-inaccessible", "off" },
+};
+const size_t pc_compat_6_2_len = G_N_ELEMENTS(pc_compat_6_2);
+
GlobalProperty pc_compat_6_1[] = {
{ TYPE_X86_CPU, "hv-version-id-build", "0x1bbc" },
{ TYPE_X86_CPU, "hv-version-id-major", "0x0006" },
@@ -1521,6 +1527,23 @@ static void pc_machine_set_default_bus_bypass_iommu(Object *obj, bool value,
pcms->default_bus_bypass_iommu = value;
}
+static void pc_machine_get_smbios_ep(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ PCMachineState *pcms = PC_MACHINE(obj);
+ SmbiosEntryPointType smbios_entry_point_type = pcms->smbios_entry_point_type;
+
+ visit_type_SmbiosEntryPointType(v, name, &smbios_entry_point_type, errp);
+}
+
+static void pc_machine_set_smbios_ep(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ PCMachineState *pcms = PC_MACHINE(obj);
+
+ visit_type_SmbiosEntryPointType(v, name, &pcms->smbios_entry_point_type, errp);
+}
+
static void pc_machine_get_max_ram_below_4g(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
@@ -1611,6 +1634,8 @@ static void pc_machine_initfn(Object *obj)
pcms->vmport = ON_OFF_AUTO_OFF;
#endif /* CONFIG_VMPORT */
pcms->max_ram_below_4g = 0; /* use default */
+ pcms->smbios_entry_point_type = SMBIOS_ENTRY_POINT_TYPE_32;
+
/* acpi build is enabled by default if machine supports it */
pcms->acpi_build_enabled = PC_MACHINE_GET_CLASS(pcms)->has_acpi_build;
pcms->smbus_enabled = true;
@@ -1734,15 +1759,23 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
object_class_property_add_bool(oc, PC_MACHINE_SMBUS,
pc_machine_get_smbus, pc_machine_set_smbus);
+ object_class_property_set_description(oc, PC_MACHINE_SMBUS,
+ "Enable/disable system management bus");
object_class_property_add_bool(oc, PC_MACHINE_SATA,
pc_machine_get_sata, pc_machine_set_sata);
+ object_class_property_set_description(oc, PC_MACHINE_SATA,
+ "Enable/disable Serial ATA bus");
object_class_property_add_bool(oc, PC_MACHINE_PIT,
pc_machine_get_pit, pc_machine_set_pit);
+ object_class_property_set_description(oc, PC_MACHINE_PIT,
+ "Enable/disable Intel 8254 programmable interval timer emulation");
object_class_property_add_bool(oc, "hpet",
pc_machine_get_hpet, pc_machine_set_hpet);
+ object_class_property_set_description(oc, "hpet",
+ "Enable/disable high precision event timer emulation");
object_class_property_add_bool(oc, "default-bus-bypass-iommu",
pc_machine_get_default_bus_bypass_iommu,
@@ -1753,6 +1786,12 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
NULL, NULL);
object_class_property_set_description(oc, PC_MACHINE_MAX_FW_SIZE,
"Maximum combined firmware size");
+
+ object_class_property_add(oc, PC_MACHINE_SMBIOS_EP, "str",
+ pc_machine_get_smbios_ep, pc_machine_set_smbios_ep,
+ NULL, NULL);
+ object_class_property_set_description(oc, PC_MACHINE_SMBIOS_EP,
+ "SMBIOS Entry Point type [32, 64]");
}
static const TypeInfo pc_machine_info = {
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 223dd3e05d..7c7790a5ce 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -177,7 +177,7 @@ static void pc_init1(MachineState *machine,
smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)",
mc->name, pcmc->smbios_legacy_mode,
pcmc->smbios_uuid_encoded,
- SMBIOS_ENTRY_POINT_21);
+ pcms->smbios_entry_point_type);
}
/* allocate ram and load rom/bios */
@@ -413,7 +413,7 @@ static void pc_i440fx_machine_options(MachineClass *m)
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
}
-static void pc_i440fx_6_2_machine_options(MachineClass *m)
+static void pc_i440fx_7_0_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_machine_options(m);
@@ -422,6 +422,18 @@ static void pc_i440fx_6_2_machine_options(MachineClass *m)
pcmc->default_cpu_version = 1;
}
+DEFINE_I440FX_MACHINE(v7_0, "pc-i440fx-7.0", NULL,
+ pc_i440fx_7_0_machine_options);
+
+static void pc_i440fx_6_2_machine_options(MachineClass *m)
+{
+ pc_i440fx_7_0_machine_options(m);
+ m->alias = NULL;
+ m->is_default = false;
+ compat_props_add(m->compat_props, hw_compat_6_2, hw_compat_6_2_len);
+ compat_props_add(m->compat_props, pc_compat_6_2, pc_compat_6_2_len);
+}
+
DEFINE_I440FX_MACHINE(v6_2, "pc-i440fx-6.2", NULL,
pc_i440fx_6_2_machine_options);
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index e1e100316d..1780f79bc1 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -200,7 +200,7 @@ static void pc_q35_init(MachineState *machine)
smbios_set_defaults("QEMU", "Standard PC (Q35 + ICH9, 2009)",
mc->name, pcmc->smbios_legacy_mode,
pcmc->smbios_uuid_encoded,
- SMBIOS_ENTRY_POINT_21);
+ pcms->smbios_entry_point_type);
}
/* allocate ram and load rom/bios */
@@ -360,7 +360,7 @@ static void pc_q35_machine_options(MachineClass *m)
m->max_cpus = 288;
}
-static void pc_q35_6_2_machine_options(MachineClass *m)
+static void pc_q35_7_0_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_machine_options(m);
@@ -368,6 +368,17 @@ static void pc_q35_6_2_machine_options(MachineClass *m)
pcmc->default_cpu_version = 1;
}
+DEFINE_Q35_MACHINE(v7_0, "pc-q35-7.0", NULL,
+ pc_q35_7_0_machine_options);
+
+static void pc_q35_6_2_machine_options(MachineClass *m)
+{
+ pc_q35_7_0_machine_options(m);
+ m->alias = NULL;
+ compat_props_add(m->compat_props, hw_compat_6_2, hw_compat_6_2_len);
+ compat_props_add(m->compat_props, pc_compat_6_2, pc_compat_6_2_len);
+}
+
DEFINE_Q35_MACHINE(v6_2, "pc-q35-6.2", NULL,
pc_q35_6_2_machine_options);
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
index b99e63d58f..fa3cdb5755 100644
--- a/hw/intc/arm_gicv3_its.c
+++ b/hw/intc/arm_gicv3_its.c
@@ -74,7 +74,7 @@ static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
uint64_t value;
bool valid_l2t;
uint32_t l2t_id;
- uint32_t max_l2_entries;
+ uint32_t num_l2_entries;
if (s->ct.indirect) {
l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
@@ -88,12 +88,12 @@ static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
if (valid_l2t) {
- max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+ num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
l2t_addr = value & ((1ULL << 51) - 1);
*cte = address_space_ldq_le(as, l2t_addr +
- ((icid % max_l2_entries) * GITS_CTE_SIZE),
+ ((icid % num_l2_entries) * GITS_CTE_SIZE),
MEMTXATTRS_UNSPECIFIED, res);
}
}
@@ -104,7 +104,7 @@ static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
MEMTXATTRS_UNSPECIFIED, res);
}
- return (*cte & TABLE_ENTRY_VALID_MASK) != 0;
+ return FIELD_EX64(*cte, CTE, VALID);
}
static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
@@ -114,7 +114,7 @@ static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
uint64_t itt_addr;
MemTxResult res = MEMTX_OK;
- itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
+ itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
@@ -141,7 +141,7 @@ static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
bool status = false;
IteEntry ite = {};
- itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
+ itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
ite.itel = address_space_ldq_le(as, itt_addr +
@@ -156,12 +156,11 @@ static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
MEMTXATTRS_UNSPECIFIED, res);
if (*res == MEMTX_OK) {
- if (ite.itel & TABLE_ENTRY_VALID_MASK) {
- if ((ite.itel >> ITE_ENTRY_INTTYPE_SHIFT) &
- GITS_TYPE_PHYSICAL) {
- *pIntid = (ite.itel & ITE_ENTRY_INTID_MASK) >>
- ITE_ENTRY_INTID_SHIFT;
- *icid = ite.iteh & ITE_ENTRY_ICID_MASK;
+ if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
+ int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
+ if (inttype == ITE_INTTYPE_PHYSICAL) {
+ *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
+ *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
status = true;
}
}
@@ -177,7 +176,7 @@ static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
uint64_t value;
bool valid_l2t;
uint32_t l2t_id;
- uint32_t max_l2_entries;
+ uint32_t num_l2_entries;
if (s->dt.indirect) {
l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
@@ -191,12 +190,12 @@ static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
if (valid_l2t) {
- max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+ num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
l2t_addr = value & ((1ULL << 51) - 1);
value = address_space_ldq_le(as, l2t_addr +
- ((devid % max_l2_entries) * GITS_DTE_SIZE),
+ ((devid % num_l2_entries) * GITS_DTE_SIZE),
MEMTXATTRS_UNSPECIFIED, res);
}
}
@@ -256,10 +255,10 @@ static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
if (res != MEMTX_OK) {
return result;
}
- dte_valid = dte & TABLE_ENTRY_VALID_MASK;
+ dte_valid = FIELD_EX64(dte, DTE, VALID);
if (dte_valid) {
- max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
+ max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1);
ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
@@ -287,10 +286,10 @@ static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
* In this implementation, in case of guest errors we ignore the
* command and move onto the next command in the queue.
*/
- if (devid > s->dt.maxids.max_devids) {
+ if (devid >= s->dt.num_ids) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes: devid %d>%d",
- __func__, devid, s->dt.maxids.max_devids);
+ "%s: invalid command attributes: devid %d>=%d",
+ __func__, devid, s->dt.num_ids);
} else if (!dte_valid || !ite_valid || !cte_valid) {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -309,9 +308,9 @@ static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
* Current implementation only supports rdbase == procnum
* Hence rdbase physical address is ignored
*/
- rdbase = (cte & GITS_CTE_RDBASE_PROCNUM_MASK) >> 1U;
+ rdbase = FIELD_EX64(cte, CTE, RDBASE);
- if (rdbase > s->gicv3->num_cpu) {
+ if (rdbase >= s->gicv3->num_cpu) {
return result;
}
@@ -342,8 +341,6 @@ static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
MemTxResult res = MEMTX_OK;
uint16_t icid = 0;
uint64_t dte = 0;
- IteEntry ite;
- uint32_t int_spurious = INTID_SPURIOUS;
bool result = false;
devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
@@ -357,7 +354,9 @@ static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
eventid = (value & EVENTID_MASK);
- if (!ignore_pInt) {
+ if (ignore_pInt) {
+ pIntid = eventid;
+ } else {
pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
}
@@ -376,18 +375,14 @@ static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
if (res != MEMTX_OK) {
return result;
}
- dte_valid = dte & TABLE_ENTRY_VALID_MASK;
-
- max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
-
- if (!ignore_pInt) {
- max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
- }
+ dte_valid = FIELD_EX64(dte, DTE, VALID);
+ max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1);
+ max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
- if ((devid > s->dt.maxids.max_devids) || (icid > s->ct.maxids.max_collids)
+ if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids)
|| !dte_valid || (eventid > max_eventid) ||
- (!ignore_pInt && (((pIntid < GICV3_LPI_INTID_START) ||
- (pIntid > max_Intid)) && (pIntid != INTID_SPURIOUS)))) {
+ (((pIntid < GICV3_LPI_INTID_START) || (pIntid > max_Intid)) &&
+ (pIntid != INTID_SPURIOUS))) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid command attributes "
"devid %d or icid %d or eventid %d or pIntid %d or"
@@ -400,16 +395,12 @@ static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
*/
} else {
/* add ite entry to interrupt translation table */
- ite.itel = (dte_valid & TABLE_ENTRY_VALID_MASK) |
- (GITS_TYPE_PHYSICAL << ITE_ENTRY_INTTYPE_SHIFT);
-
- if (ignore_pInt) {
- ite.itel |= (eventid << ITE_ENTRY_INTID_SHIFT);
- } else {
- ite.itel |= (pIntid << ITE_ENTRY_INTID_SHIFT);
- }
- ite.itel |= (int_spurious << ITE_ENTRY_INTSP_SHIFT);
- ite.iteh = icid;
+ IteEntry ite = {};
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
+ ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
result = update_ite(s, eventid, dte, ite);
}
@@ -425,7 +416,7 @@ static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
uint64_t l2t_addr;
bool valid_l2t;
uint32_t l2t_id;
- uint32_t max_l2_entries;
+ uint32_t num_l2_entries;
uint64_t cte = 0;
MemTxResult res = MEMTX_OK;
@@ -435,7 +426,8 @@ static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
if (valid) {
/* add mapping entry to collection table */
- cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL);
+ cte = FIELD_DP64(cte, CTE, VALID, 1);
+ cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
}
/*
@@ -458,12 +450,12 @@ static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
if (valid_l2t) {
- max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+ num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
l2t_addr = value & ((1ULL << 51) - 1);
address_space_stq_le(as, l2t_addr +
- ((icid % max_l2_entries) * GITS_CTE_SIZE),
+ ((icid % num_l2_entries) * GITS_CTE_SIZE),
cte, MEMTXATTRS_UNSPECIFIED, &res);
}
} else {
@@ -505,7 +497,7 @@ static bool process_mapc(GICv3ITSState *s, uint32_t offset)
valid = (value & CMD_FIELD_VALID_MASK);
- if ((icid > s->ct.maxids.max_collids) || (rdbase > s->gicv3->num_cpu)) {
+ if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) {
qemu_log_mask(LOG_GUEST_ERROR,
"ITS MAPC: invalid collection table attributes "
"icid %d rdbase %" PRIu64 "\n", icid, rdbase);
@@ -529,16 +521,16 @@ static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
uint64_t l2t_addr;
bool valid_l2t;
uint32_t l2t_id;
- uint32_t max_l2_entries;
+ uint32_t num_l2_entries;
uint64_t dte = 0;
MemTxResult res = MEMTX_OK;
if (s->dt.valid) {
if (valid) {
/* add mapping entry to device table */
- dte = (valid & TABLE_ENTRY_VALID_MASK) |
- ((size & SIZE_MASK) << 1U) |
- (itt_addr << GITS_DTE_ITTADDR_SHIFT);
+ dte = FIELD_DP64(dte, DTE, VALID, 1);
+ dte = FIELD_DP64(dte, DTE, SIZE, size);
+ dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
}
} else {
return true;
@@ -564,12 +556,12 @@ static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
if (valid_l2t) {
- max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+ num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
l2t_addr = value & ((1ULL << 51) - 1);
address_space_stq_le(as, l2t_addr +
- ((devid % max_l2_entries) * GITS_DTE_SIZE),
+ ((devid % num_l2_entries) * GITS_DTE_SIZE),
dte, MEMTXATTRS_UNSPECIFIED, &res);
}
} else {
@@ -618,7 +610,7 @@ static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
valid = (value & CMD_FIELD_VALID_MASK);
- if ((devid > s->dt.maxids.max_devids) ||
+ if ((devid >= s->dt.num_ids) ||
(size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
qemu_log_mask(LOG_GUEST_ERROR,
"ITS MAPD: invalid device table attributes "
@@ -651,13 +643,13 @@ static void process_cmdq(GICv3ITSState *s)
uint8_t cmd;
int i;
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
return;
}
wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
- if (wr_offset > s->cq.max_entries) {
+ if (wr_offset >= s->cq.num_entries) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid write offset "
"%d\n", __func__, wr_offset);
@@ -666,7 +658,7 @@ static void process_cmdq(GICv3ITSState *s)
rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
- if (rd_offset > s->cq.max_entries) {
+ if (rd_offset >= s->cq.num_entries) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid read offset "
"%d\n", __func__, rd_offset);
@@ -729,7 +721,7 @@ static void process_cmdq(GICv3ITSState *s)
}
if (result) {
rd_offset++;
- rd_offset %= s->cq.max_entries;
+ rd_offset %= s->cq.num_entries;
s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
} else {
/*
@@ -758,6 +750,9 @@ static void extract_table_params(GICv3ITSState *s)
uint64_t value;
for (int i = 0; i < 8; i++) {
+ TableDesc *td;
+ int idbits;
+
value = s->baser[i];
if (!value) {
@@ -789,73 +784,53 @@ static void extract_table_params(GICv3ITSState *s)
type = FIELD_EX64(value, GITS_BASER, TYPE);
switch (type) {
-
case GITS_BASER_TYPE_DEVICE:
- memset(&s->dt, 0 , sizeof(s->dt));
- s->dt.valid = FIELD_EX64(value, GITS_BASER, VALID);
-
- if (!s->dt.valid) {
- return;
- }
-
- s->dt.page_sz = page_sz;
- s->dt.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
- s->dt.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
-
- if (!s->dt.indirect) {
- s->dt.max_entries = (num_pages * page_sz) / s->dt.entry_sz;
- } else {
- s->dt.max_entries = (((num_pages * page_sz) /
- L1TABLE_ENTRY_SIZE) *
- (page_sz / s->dt.entry_sz));
- }
-
- s->dt.maxids.max_devids = (1UL << (FIELD_EX64(s->typer, GITS_TYPER,
- DEVBITS) + 1));
-
- s->dt.base_addr = baser_base_addr(value, page_sz);
-
+ td = &s->dt;
+ idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
break;
-
case GITS_BASER_TYPE_COLLECTION:
- memset(&s->ct, 0 , sizeof(s->ct));
- s->ct.valid = FIELD_EX64(value, GITS_BASER, VALID);
-
- /*
- * GITS_TYPER.HCC is 0 for this implementation
- * hence writes are discarded if ct.valid is 0
- */
- if (!s->ct.valid) {
- return;
- }
-
- s->ct.page_sz = page_sz;
- s->ct.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
- s->ct.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
-
- if (!s->ct.indirect) {
- s->ct.max_entries = (num_pages * page_sz) / s->ct.entry_sz;
- } else {
- s->ct.max_entries = (((num_pages * page_sz) /
- L1TABLE_ENTRY_SIZE) *
- (page_sz / s->ct.entry_sz));
- }
-
+ td = &s->ct;
if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
- s->ct.maxids.max_collids = (1UL << (FIELD_EX64(s->typer,
- GITS_TYPER, CIDBITS) + 1));
+ idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
} else {
/* 16-bit CollectionId supported when CIL == 0 */
- s->ct.maxids.max_collids = (1UL << 16);
+ idbits = 16;
}
-
- s->ct.base_addr = baser_base_addr(value, page_sz);
-
break;
-
default:
- break;
+ /*
+ * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
+ * ensures we will only see type values corresponding to
+ * the values set up in gicv3_its_reset().
+ */
+ g_assert_not_reached();
+ }
+
+ memset(td, 0, sizeof(*td));
+ td->valid = FIELD_EX64(value, GITS_BASER, VALID);
+ /*
+ * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
+ * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
+ * do not have a special case where the GITS_BASER<n>.Valid bit is 0
+ * for the register corresponding to the Collection table but we
+ * still have to process interrupts using non-memory-backed
+ * Collection table entries.)
+ */
+ if (!td->valid) {
+ continue;
+ }
+ td->page_sz = page_sz;
+ td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
+ td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
+ td->base_addr = baser_base_addr(value, page_sz);
+ if (!td->indirect) {
+ td->num_entries = (num_pages * page_sz) / td->entry_sz;
+ } else {
+ td->num_entries = (((num_pages * page_sz) /
+ L1TABLE_ENTRY_SIZE) *
+ (page_sz / td->entry_sz));
}
+ td->num_ids = 1ULL << idbits;
}
}
@@ -870,7 +845,7 @@ static void extract_cmdq_params(GICv3ITSState *s)
s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
if (s->cq.valid) {
- s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) /
+ s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
GITS_CMDQ_ENTRY_SIZE;
s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
@@ -887,7 +862,7 @@ static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
switch (offset) {
case GITS_TRANSLATER:
- if (s->ctlr & ITS_CTLR_ENABLED) {
+ if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
devid = attrs.requester_id;
result = process_its_cmd(s, data, devid, NONE);
}
@@ -912,13 +887,13 @@ static bool its_writel(GICv3ITSState *s, hwaddr offset,
switch (offset) {
case GITS_CTLR:
if (value & R_GITS_CTLR_ENABLED_MASK) {
- s->ctlr |= ITS_CTLR_ENABLED;
+ s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
extract_table_params(s);
extract_cmdq_params(s);
s->creadr = 0;
process_cmdq(s);
} else {
- s->ctlr &= ~ITS_CTLR_ENABLED;
+ s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
}
break;
case GITS_CBASER:
@@ -926,7 +901,7 @@ static bool its_writel(GICv3ITSState *s, hwaddr offset,
* IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = deposit64(s->cbaser, 0, 32, value);
s->creadr = 0;
s->cwriter = s->creadr;
@@ -937,7 +912,7 @@ static bool its_writel(GICv3ITSState *s, hwaddr offset,
* IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = deposit64(s->cbaser, 32, 32, value);
s->creadr = 0;
s->cwriter = s->creadr;
@@ -979,7 +954,7 @@ static bool its_writel(GICv3ITSState *s, hwaddr offset,
* IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
index = (offset - GITS_BASER) / 8;
if (offset & 7) {
@@ -1076,7 +1051,7 @@ static bool its_writell(GICv3ITSState *s, hwaddr offset,
* IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
index = (offset - GITS_BASER) / 8;
s->baser[index] &= GITS_BASER_RO_MASK;
s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
@@ -1087,7 +1062,7 @@ static bool its_writell(GICv3ITSState *s, hwaddr offset,
* IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = value;
s->creadr = 0;
s->cwriter = s->creadr;
@@ -1254,8 +1229,7 @@ static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
"gicv3-its-sysmem");
/* set the ITS default features supported */
- s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL,
- GITS_TYPE_PHYSICAL);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
ITS_ITT_ENTRY_SIZE - 1);
s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
@@ -1298,7 +1272,7 @@ static void gicv3_its_reset(DeviceState *dev)
static void gicv3_its_post_load(GICv3ITSState *s)
{
- if (s->ctlr & ITS_CTLR_ENABLED) {
+ if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
extract_table_params(s);
extract_cmdq_params(s);
}
diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h
index b9c37453b0..1eeb99035d 100644
--- a/hw/intc/gicv3_internal.h
+++ b/hw/intc/gicv3_internal.h
@@ -289,8 +289,6 @@ FIELD(GITS_TYPER, CIL, 36, 1)
#define GITS_IDREGS 0xFFD0
-#define ITS_CTLR_ENABLED (1U) /* ITS Enabled */
-
#define GITS_BASER_RO_MASK (R_GITS_BASER_ENTRYSIZE_MASK | \
R_GITS_BASER_TYPE_MASK)
@@ -356,28 +354,30 @@ FIELD(MAPC, RDBASE, 16, 32)
#define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK
#define TABLE_ENTRY_VALID_MASK (1ULL << 0)
-/**
- * Default features advertised by this version of ITS
- */
-/* Physical LPIs supported */
-#define GITS_TYPE_PHYSICAL (1U << 0)
-
/*
* 12 bytes Interrupt translation Table Entry size
* as per Table 5.3 in GICv3 spec
* ITE Lower 8 Bytes
* Bits: | 49 ... 26 | 25 ... 2 | 1 | 0 |
- * Values: | 1023 | IntNum | IntType | Valid |
+ * Values: | Doorbell | IntNum | IntType | Valid |
* ITE Higher 4 Bytes
* Bits: | 31 ... 16 | 15 ...0 |
* Values: | vPEID | ICID |
+ * (When Doorbell is unused, as it always is in GICv3, it is 1023)
*/
#define ITS_ITT_ENTRY_SIZE 0xC
-#define ITE_ENTRY_INTTYPE_SHIFT 1
-#define ITE_ENTRY_INTID_SHIFT 2
-#define ITE_ENTRY_INTID_MASK MAKE_64BIT_MASK(2, 24)
-#define ITE_ENTRY_INTSP_SHIFT 26
-#define ITE_ENTRY_ICID_MASK MAKE_64BIT_MASK(0, 16)
+
+FIELD(ITE_L, VALID, 0, 1)
+FIELD(ITE_L, INTTYPE, 1, 1)
+FIELD(ITE_L, INTID, 2, 24)
+FIELD(ITE_L, DOORBELL, 26, 24)
+
+FIELD(ITE_H, ICID, 0, 16)
+FIELD(ITE_H, VPEID, 16, 16)
+
+/* Possible values for ITE_L INTTYPE */
+#define ITE_INTTYPE_VIRTUAL 0
+#define ITE_INTTYPE_PHYSICAL 1
/* 16 bits EventId */
#define ITS_IDBITS GICD_TYPER_IDBITS
@@ -393,16 +393,18 @@ FIELD(MAPC, RDBASE, 16, 32)
* Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
*/
#define GITS_DTE_SIZE (0x8ULL)
-#define GITS_DTE_ITTADDR_SHIFT 6
-#define GITS_DTE_ITTADDR_MASK MAKE_64BIT_MASK(GITS_DTE_ITTADDR_SHIFT, \
- ITTADDR_LENGTH)
+
+FIELD(DTE, VALID, 0, 1)
+FIELD(DTE, SIZE, 1, 5)
+FIELD(DTE, ITTADDR, 6, 44)
/*
* 8 bytes Collection Table Entry size
- * Valid = 1 bit,RDBase = 36 bits(considering max RDBASE)
+ * Valid = 1 bit, RDBase = 16 bits
*/
#define GITS_CTE_SIZE (0x8ULL)
-#define GITS_CTE_RDBASE_PROCNUM_MASK MAKE_64BIT_MASK(1, RDBASE_PROCNUM_LENGTH)
+FIELD(CTE, VALID, 0, 1)
+FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH)
/* Special interrupt IDs */
#define INTID_SECURE 1020
diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c
index 877e76877c..746c0f0343 100644
--- a/hw/intc/sifive_plic.c
+++ b/hw/intc/sifive_plic.c
@@ -31,7 +31,10 @@
#include "migration/vmstate.h"
#include "hw/irq.h"
-#define RISCV_DEBUG_PLIC 0
+static bool addr_between(uint32_t addr, uint32_t base, uint32_t num)
+{
+ return addr >= base && addr - base < num;
+}
static PLICMode char_to_mode(char c)
{
@@ -46,47 +49,6 @@ static PLICMode char_to_mode(char c)
}
}
-static char mode_to_char(PLICMode m)
-{
- switch (m) {
- case PLICMode_U: return 'U';
- case PLICMode_S: return 'S';
- case PLICMode_H: return 'H';
- case PLICMode_M: return 'M';
- default: return '?';
- }
-}
-
-static void sifive_plic_print_state(SiFivePLICState *plic)
-{
- int i;
- int addrid;
-
- /* pending */
- qemu_log("pending : ");
- for (i = plic->bitfield_words - 1; i >= 0; i--) {
- qemu_log("%08x", plic->pending[i]);
- }
- qemu_log("\n");
-
- /* pending */
- qemu_log("claimed : ");
- for (i = plic->bitfield_words - 1; i >= 0; i--) {
- qemu_log("%08x", plic->claimed[i]);
- }
- qemu_log("\n");
-
- for (addrid = 0; addrid < plic->num_addrs; addrid++) {
- qemu_log("hart%d-%c enable: ",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode));
- for (i = plic->bitfield_words - 1; i >= 0; i--) {
- qemu_log("%08x", plic->enable[addrid * plic->bitfield_words + i]);
- }
- qemu_log("\n");
- }
-}
-
static uint32_t atomic_set_masked(uint32_t *a, uint32_t mask, uint32_t value)
{
uint32_t old, new, cmp = qatomic_read(a);
@@ -110,26 +72,34 @@ static void sifive_plic_set_claimed(SiFivePLICState *plic, int irq, bool level)
atomic_set_masked(&plic->claimed[irq >> 5], 1 << (irq & 31), -!!level);
}
-static int sifive_plic_irqs_pending(SiFivePLICState *plic, uint32_t addrid)
+static uint32_t sifive_plic_claimed(SiFivePLICState *plic, uint32_t addrid)
{
+ uint32_t max_irq = 0;
+ uint32_t max_prio = plic->target_priority[addrid];
int i, j;
+
for (i = 0; i < plic->bitfield_words; i++) {
uint32_t pending_enabled_not_claimed =
- (plic->pending[i] & ~plic->claimed[i]) &
- plic->enable[addrid * plic->bitfield_words + i];
+ (plic->pending[i] & ~plic->claimed[i]) &
+ plic->enable[addrid * plic->bitfield_words + i];
+
if (!pending_enabled_not_claimed) {
continue;
}
+
for (j = 0; j < 32; j++) {
int irq = (i << 5) + j;
uint32_t prio = plic->source_priority[irq];
int enabled = pending_enabled_not_claimed & (1 << j);
- if (enabled && prio > plic->target_priority[addrid]) {
- return 1;
+
+ if (enabled && prio > max_prio) {
+ max_irq = irq;
+ max_prio = prio;
}
}
}
- return 0;
+
+ return max_irq;
}
static void sifive_plic_update(SiFivePLICState *plic)
@@ -140,7 +110,7 @@ static void sifive_plic_update(SiFivePLICState *plic)
for (addrid = 0; addrid < plic->num_addrs; addrid++) {
uint32_t hartid = plic->addr_config[addrid].hartid;
PLICMode mode = plic->addr_config[addrid].mode;
- int level = sifive_plic_irqs_pending(plic, addrid);
+ bool level = !!sifive_plic_claimed(plic, addrid);
switch (mode) {
case PLICMode_M:
@@ -153,111 +123,48 @@ static void sifive_plic_update(SiFivePLICState *plic)
break;
}
}
-
- if (RISCV_DEBUG_PLIC) {
- sifive_plic_print_state(plic);
- }
-}
-
-static uint32_t sifive_plic_claim(SiFivePLICState *plic, uint32_t addrid)
-{
- int i, j;
- uint32_t max_irq = 0;
- uint32_t max_prio = plic->target_priority[addrid];
-
- for (i = 0; i < plic->bitfield_words; i++) {
- uint32_t pending_enabled_not_claimed =
- (plic->pending[i] & ~plic->claimed[i]) &
- plic->enable[addrid * plic->bitfield_words + i];
- if (!pending_enabled_not_claimed) {
- continue;
- }
- for (j = 0; j < 32; j++) {
- int irq = (i << 5) + j;
- uint32_t prio = plic->source_priority[irq];
- int enabled = pending_enabled_not_claimed & (1 << j);
- if (enabled && prio > max_prio) {
- max_irq = irq;
- max_prio = prio;
- }
- }
- }
-
- if (max_irq) {
- sifive_plic_set_pending(plic, max_irq, false);
- sifive_plic_set_claimed(plic, max_irq, true);
- }
- return max_irq;
}
static uint64_t sifive_plic_read(void *opaque, hwaddr addr, unsigned size)
{
SiFivePLICState *plic = opaque;
- /* writes must be 4 byte words */
- if ((addr & 0x3) != 0) {
- goto err;
- }
-
- if (addr >= plic->priority_base && /* 4 bytes per source */
- addr < plic->priority_base + (plic->num_sources << 2))
- {
+ if (addr_between(addr, plic->priority_base, plic->num_sources << 2)) {
uint32_t irq = ((addr - plic->priority_base) >> 2) + 1;
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read priority: irq=%d priority=%d\n",
- irq, plic->source_priority[irq]);
- }
+
return plic->source_priority[irq];
- } else if (addr >= plic->pending_base && /* 1 bit per source */
- addr < plic->pending_base + (plic->num_sources >> 3))
- {
+ } else if (addr_between(addr, plic->pending_base, plic->num_sources >> 3)) {
uint32_t word = (addr - plic->pending_base) >> 2;
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read pending: word=%d value=%d\n",
- word, plic->pending[word]);
- }
+
return plic->pending[word];
- } else if (addr >= plic->enable_base && /* 1 bit per source */
- addr < plic->enable_base + plic->num_addrs * plic->enable_stride)
- {
+ } else if (addr_between(addr, plic->enable_base,
+ plic->num_addrs * plic->enable_stride)) {
uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride;
uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2;
+
if (wordid < plic->bitfield_words) {
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read enable: hart%d-%c word=%d value=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode), wordid,
- plic->enable[addrid * plic->bitfield_words + wordid]);
- }
return plic->enable[addrid * plic->bitfield_words + wordid];
}
- } else if (addr >= plic->context_base && /* 1 bit per source */
- addr < plic->context_base + plic->num_addrs * plic->context_stride)
- {
+ } else if (addr_between(addr, plic->context_base,
+ plic->num_addrs * plic->context_stride)) {
uint32_t addrid = (addr - plic->context_base) / plic->context_stride;
uint32_t contextid = (addr & (plic->context_stride - 1));
+
if (contextid == 0) {
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read priority: hart%d-%c priority=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode),
- plic->target_priority[addrid]);
- }
return plic->target_priority[addrid];
} else if (contextid == 4) {
- uint32_t value = sifive_plic_claim(plic, addrid);
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read claim: hart%d-%c irq=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode),
- value);
+ uint32_t max_irq = sifive_plic_claimed(plic, addrid);
+
+ if (max_irq) {
+ sifive_plic_set_pending(plic, max_irq, false);
+ sifive_plic_set_claimed(plic, max_irq, true);
}
+
sifive_plic_update(plic);
- return value;
+ return max_irq;
}
}
-err:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Invalid register read 0x%" HWADDR_PRIx "\n",
__func__, addr);
@@ -269,80 +176,53 @@ static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value,
{
SiFivePLICState *plic = opaque;
- /* writes must be 4 byte words */
- if ((addr & 0x3) != 0) {
- goto err;
- }
-
- if (addr >= plic->priority_base && /* 4 bytes per source */
- addr < plic->priority_base + (plic->num_sources << 2))
- {
+ if (addr_between(addr, plic->priority_base, plic->num_sources << 2)) {
uint32_t irq = ((addr - plic->priority_base) >> 2) + 1;
+
plic->source_priority[irq] = value & 7;
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: write priority: irq=%d priority=%d\n",
- irq, plic->source_priority[irq]);
- }
sifive_plic_update(plic);
- return;
- } else if (addr >= plic->pending_base && /* 1 bit per source */
- addr < plic->pending_base + (plic->num_sources >> 3))
- {
+ } else if (addr_between(addr, plic->pending_base,
+ plic->num_sources >> 3)) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid pending write: 0x%" HWADDR_PRIx "",
__func__, addr);
- return;
- } else if (addr >= plic->enable_base && /* 1 bit per source */
- addr < plic->enable_base + plic->num_addrs * plic->enable_stride)
- {
+ } else if (addr_between(addr, plic->enable_base,
+ plic->num_addrs * plic->enable_stride)) {
uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride;
uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2;
+
if (wordid < plic->bitfield_words) {
plic->enable[addrid * plic->bitfield_words + wordid] = value;
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: write enable: hart%d-%c word=%d value=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode), wordid,
- plic->enable[addrid * plic->bitfield_words + wordid]);
- }
- return;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid enable write 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
}
- } else if (addr >= plic->context_base && /* 4 bytes per reg */
- addr < plic->context_base + plic->num_addrs * plic->context_stride)
- {
+ } else if (addr_between(addr, plic->context_base,
+ plic->num_addrs * plic->context_stride)) {
uint32_t addrid = (addr - plic->context_base) / plic->context_stride;
uint32_t contextid = (addr & (plic->context_stride - 1));
+
if (contextid == 0) {
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: write priority: hart%d-%c priority=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode),
- plic->target_priority[addrid]);
- }
if (value <= plic->num_priorities) {
plic->target_priority[addrid] = value;
sifive_plic_update(plic);
}
- return;
} else if (contextid == 4) {
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: write claim: hart%d-%c irq=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode),
- (uint32_t)value);
- }
if (value < plic->num_sources) {
sifive_plic_set_claimed(plic, value, false);
sifive_plic_update(plic);
}
- return;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid context write 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
}
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid register write 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
}
-
-err:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Invalid register write 0x%" HWADDR_PRIx "\n",
- __func__, addr);
}
static const MemoryRegionOps sifive_plic_ops = {
@@ -355,6 +235,23 @@ static const MemoryRegionOps sifive_plic_ops = {
}
};
+static void sifive_plic_reset(DeviceState *dev)
+{
+ SiFivePLICState *s = SIFIVE_PLIC(dev);
+ int i;
+
+ memset(s->source_priority, 0, sizeof(uint32_t) * s->num_sources);
+ memset(s->target_priority, 0, sizeof(uint32_t) * s->num_addrs);
+ memset(s->pending, 0, sizeof(uint32_t) * s->bitfield_words);
+ memset(s->claimed, 0, sizeof(uint32_t) * s->bitfield_words);
+ memset(s->enable, 0, sizeof(uint32_t) * s->num_enables);
+
+ for (i = 0; i < s->num_harts; i++) {
+ qemu_set_irq(s->m_external_irqs[i], 0);
+ qemu_set_irq(s->s_external_irqs[i], 0);
+ }
+}
+
/*
* parse PLIC hart/mode address offset config
*
@@ -501,6 +398,7 @@ static void sifive_plic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->reset = sifive_plic_reset;
device_class_set_props(dc, sifive_plic_properties);
dc->realize = sifive_plic_realize;
dc->vmsd = &vmstate_sifive_plic;
diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c
index e4c7c9b88a..55dfe5036f 100644
--- a/hw/m68k/q800.c
+++ b/hw/m68k/q800.c
@@ -672,12 +672,13 @@ static void q800_init(MachineState *machine)
/* Remove qtest_enabled() check once firmware files are in the tree */
if (!qtest_enabled()) {
- if (bios_size < 0 || bios_size > MACROM_SIZE) {
+ if (bios_size <= 0 || bios_size > MACROM_SIZE) {
error_report("could not load MacROM '%s'", bios_name);
exit(1);
}
- ptr = rom_ptr(MACROM_ADDR, MACROM_SIZE);
+ ptr = rom_ptr(MACROM_ADDR, bios_size);
+ assert(ptr != NULL);
stl_phys(cs->as, 0, ldl_p(ptr)); /* reset initial SP */
stl_phys(cs->as, 4,
MACROM_ADDR + ldl_p(ptr + 4)); /* reset initial PC */
diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c
index 0efa4a45c7..78e926a554 100644
--- a/hw/m68k/virt.c
+++ b/hw/m68k/virt.c
@@ -304,10 +304,17 @@ type_init(virt_machine_register_types)
} \
type_init(machvirt_machine_##major##_##minor##_init);
+static void virt_machine_7_0_options(MachineClass *mc)
+{
+}
+DEFINE_VIRT_MACHINE(7, 0, true)
+
static void virt_machine_6_2_options(MachineClass *mc)
{
+ virt_machine_7_0_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
}
-DEFINE_VIRT_MACHINE(6, 2, true)
+DEFINE_VIRT_MACHINE(6, 2, false)
static void virt_machine_6_1_options(MachineClass *mc)
{
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index e5993c1ef5..5d30f9ca60 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -1390,7 +1390,7 @@ static void pci_update_mappings(PCIDevice *d)
/* now do the real mapping */
if (r->addr != PCI_BAR_UNMAPPED) {
- trace_pci_update_mappings_del(d, pci_dev_bus_num(d),
+ trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d),
PCI_SLOT(d->devfn),
PCI_FUNC(d->devfn),
i, r->addr, r->size);
@@ -1398,7 +1398,7 @@ static void pci_update_mappings(PCIDevice *d)
}
r->addr = new_addr;
if (r->addr != PCI_BAR_UNMAPPED) {
- trace_pci_update_mappings_add(d, pci_dev_bus_num(d),
+ trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d),
PCI_SLOT(d->devfn),
PCI_FUNC(d->devfn),
i, r->addr, r->size);
@@ -1497,11 +1497,6 @@ static void pci_irq_handler(void *opaque, int irq_num, int level)
pci_change_irq_level(pci_dev, irq_num, change);
}
-static inline int pci_intx(PCIDevice *pci_dev)
-{
- return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1;
-}
-
qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
{
int intx = pci_intx(pci_dev);
diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c
index 7beafd40a8..eaf217ff55 100644
--- a/hw/pci/pci_host.c
+++ b/hw/pci/pci_host.c
@@ -79,7 +79,8 @@ void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr,
return;
}
- trace_pci_cfg_write(pci_dev->name, PCI_SLOT(pci_dev->devfn),
+ trace_pci_cfg_write(pci_dev->name, pci_dev_bus_num(pci_dev),
+ PCI_SLOT(pci_dev->devfn),
PCI_FUNC(pci_dev->devfn), addr, val);
pci_dev->config_write(pci_dev, addr, val, MIN(len, limit - addr));
}
@@ -104,7 +105,8 @@ uint32_t pci_host_config_read_common(PCIDevice *pci_dev, uint32_t addr,
}
ret = pci_dev->config_read(pci_dev, addr, MIN(len, limit - addr));
- trace_pci_cfg_read(pci_dev->name, PCI_SLOT(pci_dev->devfn),
+ trace_pci_cfg_read(pci_dev->name, pci_dev_bus_num(pci_dev),
+ PCI_SLOT(pci_dev->devfn),
PCI_FUNC(pci_dev->devfn), addr, ret);
return ret;
diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c
index 27f9cc56af..e1a8a88c8c 100644
--- a/hw/pci/pcie_aer.c
+++ b/hw/pci/pcie_aer.c
@@ -774,7 +774,9 @@ void pcie_aer_root_write_config(PCIDevice *dev,
uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
/* 6.2.4.1.2 Interrupt Generation */
if (!msix_enabled(dev) && !msi_enabled(dev)) {
- pci_set_irq(dev, !!(root_cmd & enabled_cmd));
+ if (pci_intx(dev) != -1) {
+ pci_set_irq(dev, !!(root_cmd & enabled_cmd));
+ }
return;
}
diff --git a/hw/pci/trace-events b/hw/pci/trace-events
index fc777d0b5e..7570752c40 100644
--- a/hw/pci/trace-events
+++ b/hw/pci/trace-events
@@ -1,12 +1,12 @@
# See docs/devel/tracing.rst for syntax documentation.
# pci.c
-pci_update_mappings_del(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
-pci_update_mappings_add(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
+pci_update_mappings_del(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
+pci_update_mappings_add(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
# pci_host.c
-pci_cfg_read(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x -> 0x%x"
-pci_cfg_write(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x <- 0x%x"
+pci_cfg_read(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x -> 0x%x"
+pci_cfg_write(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x <- 0x%x"
# msix.c
msix_write_config(char *name, bool enabled, bool masked) "dev %s enabled %d masked %d"
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 3b5fd749be..8373429325 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -4666,14 +4666,25 @@ static void spapr_machine_latest_class_options(MachineClass *mc)
type_init(spapr_machine_register_##suffix)
/*
+ * pseries-7.0
+ */
+static void spapr_machine_7_0_class_options(MachineClass *mc)
+{
+ /* Defaults for the latest behaviour inherited from the base class */
+}
+
+DEFINE_SPAPR_MACHINE(7_0, "7.0", true);
+
+/*
* pseries-6.2
*/
static void spapr_machine_6_2_class_options(MachineClass *mc)
{
- /* Defaults for the latest behaviour inherited from the base class */
+ spapr_machine_7_0_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
}
-DEFINE_SPAPR_MACHINE(6_2, "6.2", true);
+DEFINE_SPAPR_MACHINE(6_2, "6.2", false);
/*
* pseries-6.1
diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c
index d1d065efbc..cafd1fc9ae 100644
--- a/hw/riscv/microchip_pfsoc.c
+++ b/hw/riscv/microchip_pfsoc.c
@@ -471,7 +471,7 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
/* Initialize SoC */
object_initialize_child(OBJECT(machine), "soc", &s->soc,
TYPE_MICROCHIP_PFSOC);
- qdev_realize(DEVICE(&s->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
/* Split RAM into low and high regions using aliases to machine->ram */
mem_low_size = memmap[MICROCHIP_PFSOC_DRAM_LO].size;
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
index c531450b9f..0856c347e8 100644
--- a/hw/riscv/opentitan.c
+++ b/hw/riscv/opentitan.c
@@ -80,7 +80,7 @@ static void opentitan_board_init(MachineState *machine)
/* Initialize SoC */
object_initialize_child(OBJECT(machine), "soc", &s->soc,
TYPE_RISCV_IBEX_SOC);
- qdev_realize(DEVICE(&s->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
memory_region_add_subregion(sys_mem,
memmap[IBEX_DEV_RAM].base, machine->ram);
diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c
index 9b206407a6..dcb87b6cfd 100644
--- a/hw/riscv/sifive_e.c
+++ b/hw/riscv/sifive_e.c
@@ -88,7 +88,7 @@ static void sifive_e_machine_init(MachineState *machine)
/* Initialize SoC */
object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_E_SOC);
- qdev_realize(DEVICE(&s->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
/* Data Tightly Integrated Memory */
memory_region_add_subregion(sys_mem,
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
index aa74e67889..7fbc7dea42 100644
--- a/hw/riscv/sifive_u.c
+++ b/hw/riscv/sifive_u.c
@@ -547,7 +547,7 @@ static void sifive_u_machine_init(MachineState *machine)
&error_abort);
object_property_set_str(OBJECT(&s->soc), "cpu-type", machine->cpu_type,
&error_abort);
- qdev_realize(DEVICE(&s->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
/* register RAM */
memory_region_add_subregion(system_memory, memmap[SIFIVE_U_DEV_DRAM].base,
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 653587ea62..84e3e63c43 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -791,14 +791,26 @@ bool css_migration_enabled(void)
} \
type_init(ccw_machine_register_##suffix)
+static void ccw_machine_7_0_instance_options(MachineState *machine)
+{
+}
+
+static void ccw_machine_7_0_class_options(MachineClass *mc)
+{
+}
+DEFINE_CCW_MACHINE(7_0, "7.0", true);
+
static void ccw_machine_6_2_instance_options(MachineState *machine)
{
+ ccw_machine_7_0_instance_options(machine);
}
static void ccw_machine_6_2_class_options(MachineClass *mc)
{
+ ccw_machine_7_0_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
}
-DEFINE_CCW_MACHINE(6_2, "6.2", true);
+DEFINE_CCW_MACHINE(6_2, "6.2", false);
static void ccw_machine_6_1_instance_options(MachineState *machine)
{
diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
index 039caf2614..778f43e4c1 100644
--- a/hw/scsi/vhost-scsi.c
+++ b/hw/scsi/vhost-scsi.c
@@ -170,6 +170,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
Error *err = NULL;
int vhostfd = -1;
int ret;
+ struct vhost_virtqueue *vqs = NULL;
if (!vs->conf.wwpn) {
error_setg(errp, "vhost-scsi: missing wwpn");
@@ -213,13 +214,19 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
}
vsc->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
- vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs);
+ vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs);
+ vsc->dev.vqs = vqs;
vsc->dev.vq_index = 0;
vsc->dev.backend_features = 0;
ret = vhost_dev_init(&vsc->dev, (void *)(uintptr_t)vhostfd,
VHOST_BACKEND_TYPE_KERNEL, 0, errp);
if (ret < 0) {
+ /*
+ * vhost_dev_init calls vhost_dev_cleanup on error, which closes
+ * vhostfd, don't double close it.
+ */
+ vhostfd = -1;
goto free_vqs;
}
@@ -232,7 +239,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
return;
free_vqs:
- g_free(vsc->dev.vqs);
+ g_free(vqs);
if (!vsc->migratable) {
migrate_del_blocker(vsc->migration_blocker);
}
@@ -240,7 +247,9 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
error_free(vsc->migration_blocker);
virtio_scsi_common_unrealize(dev);
close_fd:
- close(vhostfd);
+ if (vhostfd >= 0) {
+ close(vhostfd);
+ }
return;
}
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
index bb5dbff68c..cd67a7bac8 100644
--- a/hw/sd/sd.c
+++ b/hw/sd/sd.c
@@ -116,8 +116,8 @@ struct SDState {
int32_t state; /* current card state, one of SDCardStates */
uint32_t vhs;
bool wp_switch;
- unsigned long *wp_groups;
- int32_t wpgrps_size;
+ unsigned long *wp_group_bmap;
+ int32_t wp_group_bits;
uint64_t size;
uint32_t blk_len;
uint32_t multi_blk_cnt;
@@ -290,12 +290,6 @@ FIELD(OCR, CARD_POWER_UP, 31, 1)
| R_OCR_CARD_CAPACITY_MASK \
| R_OCR_CARD_POWER_UP_MASK)
-static void sd_set_ocr(SDState *sd)
-{
- /* All voltages OK */
- sd->ocr = R_OCR_VDD_VOLTAGE_WIN_HI_MASK;
-}
-
static void sd_ocr_powerup(void *opaque)
{
SDState *sd = opaque;
@@ -311,6 +305,22 @@ static void sd_ocr_powerup(void *opaque)
}
}
+static void sd_set_ocr(SDState *sd)
+{
+ /* All voltages OK */
+ sd->ocr = R_OCR_VDD_VOLTAGE_WIN_HI_MASK;
+
+ if (sd->spi) {
+ /*
+ * We don't need to emulate power up sequence in SPI-mode.
+ * Thus, the card's power up status bit should be set to 1 when reset.
+ * The card's capacity status bit should also be set if SD card size
+ * is larger than 2GB for SDHC support.
+ */
+ sd_ocr_powerup(sd);
+ }
+}
+
static void sd_set_scr(SDState *sd)
{
sd->scr[0] = 0 << 4; /* SCR structure version 1.0 */
@@ -560,6 +570,7 @@ static void sd_reset(DeviceState *dev)
sd->state = sd_idle_state;
sd->rca = 0x0000;
+ sd->size = size;
sd_set_ocr(sd);
sd_set_scr(sd);
sd_set_cid(sd);
@@ -567,14 +578,13 @@ static void sd_reset(DeviceState *dev)
sd_set_cardstatus(sd);
sd_set_sdstatus(sd);
- g_free(sd->wp_groups);
+ g_free(sd->wp_group_bmap);
sd->wp_switch = sd->blk ? !blk_is_writable(sd->blk) : false;
- sd->wpgrps_size = sect;
- sd->wp_groups = bitmap_new(sd->wpgrps_size);
+ sd->wp_group_bits = sect;
+ sd->wp_group_bmap = bitmap_new(sd->wp_group_bits);
memset(sd->function_group, 0, sizeof(sd->function_group));
sd->erase_start = INVALID_ADDRESS;
sd->erase_end = INVALID_ADDRESS;
- sd->size = size;
sd->blk_len = 0x200;
sd->pwd_len = 0;
sd->expecting_acmd = false;
@@ -673,7 +683,7 @@ static const VMStateDescription sd_vmstate = {
VMSTATE_UINT32(card_status, SDState),
VMSTATE_PARTIAL_BUFFER(sd_status, SDState, 1),
VMSTATE_UINT32(vhs, SDState),
- VMSTATE_BITMAP(wp_groups, SDState, 0, wpgrps_size),
+ VMSTATE_BITMAP(wp_group_bmap, SDState, 0, wp_group_bits),
VMSTATE_UINT32(blk_len, SDState),
VMSTATE_UINT32(multi_blk_cnt, SDState),
VMSTATE_UINT32(erase_start, SDState),
@@ -803,8 +813,8 @@ static void sd_erase(SDState *sd)
if (sdsc) {
/* Only SDSC cards support write protect groups */
wpnum = sd_addr_to_wpnum(erase_addr);
- assert(wpnum < sd->wpgrps_size);
- if (test_bit(wpnum, sd->wp_groups)) {
+ assert(wpnum < sd->wp_group_bits);
+ if (test_bit(wpnum, sd->wp_group_bmap)) {
sd->card_status |= WP_ERASE_SKIP;
continue;
}
@@ -828,8 +838,8 @@ static uint32_t sd_wpbits(SDState *sd, uint64_t addr)
*/
continue;
}
- assert(wpnum < sd->wpgrps_size);
- if (test_bit(wpnum, sd->wp_groups)) {
+ assert(wpnum < sd->wp_group_bits);
+ if (test_bit(wpnum, sd->wp_group_bmap)) {
ret |= (1 << i);
}
}
@@ -869,7 +879,7 @@ static void sd_function_switch(SDState *sd, uint32_t arg)
static inline bool sd_wp_addr(SDState *sd, uint64_t addr)
{
- return test_bit(sd_addr_to_wpnum(addr), sd->wp_groups);
+ return test_bit(sd_addr_to_wpnum(addr), sd->wp_group_bmap);
}
static void sd_lock_command(SDState *sd)
@@ -897,7 +907,7 @@ static void sd_lock_command(SDState *sd)
sd->card_status |= LOCK_UNLOCK_FAILED;
return;
}
- bitmap_zero(sd->wp_groups, sd->wpgrps_size);
+ bitmap_zero(sd->wp_group_bmap, sd->wp_group_bits);
sd->csd[14] &= ~0x10;
sd->card_status &= ~CARD_IS_LOCKED;
sd->pwd_len = 0;
@@ -1348,7 +1358,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
}
sd->state = sd_programming_state;
- set_bit(sd_addr_to_wpnum(addr), sd->wp_groups);
+ set_bit(sd_addr_to_wpnum(addr), sd->wp_group_bmap);
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
@@ -1370,7 +1380,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
}
sd->state = sd_programming_state;
- clear_bit(sd_addr_to_wpnum(addr), sd->wp_groups);
+ clear_bit(sd_addr_to_wpnum(addr), sd->wp_group_bmap);
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c
index 7397e56737..6013df1698 100644
--- a/hw/smbios/smbios.c
+++ b/hw/smbios/smbios.c
@@ -62,7 +62,7 @@ uint8_t *smbios_tables;
size_t smbios_tables_len;
unsigned smbios_table_max;
unsigned smbios_table_cnt;
-static SmbiosEntryPointType smbios_ep_type = SMBIOS_ENTRY_POINT_21;
+static SmbiosEntryPointType smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_32;
static SmbiosEntryPoint ep;
@@ -432,7 +432,7 @@ static void smbios_validate_table(MachineState *ms)
exit(1);
}
- if (smbios_ep_type == SMBIOS_ENTRY_POINT_21 &&
+ if (smbios_ep_type == SMBIOS_ENTRY_POINT_TYPE_32 &&
smbios_tables_len > SMBIOS_21_MAX_TABLES_LEN) {
error_report("SMBIOS 2.1 table length %zu exceeds %d",
smbios_tables_len, SMBIOS_21_MAX_TABLES_LEN);
@@ -927,7 +927,7 @@ void smbios_set_defaults(const char *manufacturer, const char *product,
static void smbios_entry_point_setup(void)
{
switch (smbios_ep_type) {
- case SMBIOS_ENTRY_POINT_21:
+ case SMBIOS_ENTRY_POINT_TYPE_32:
memcpy(ep.ep21.anchor_string, "_SM_", 4);
memcpy(ep.ep21.intermediate_anchor_string, "_DMI_", 5);
ep.ep21.length = sizeof(struct smbios_21_entry_point);
@@ -950,7 +950,7 @@ static void smbios_entry_point_setup(void)
ep.ep21.structure_table_address = cpu_to_le32(0);
break;
- case SMBIOS_ENTRY_POINT_30:
+ case SMBIOS_ENTRY_POINT_TYPE_64:
memcpy(ep.ep30.anchor_string, "_SM3_", 5);
ep.ep30.length = sizeof(struct smbios_30_entry_point);
ep.ep30.entry_point_revision = 1;
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index b65f8f7e97..e409a865ae 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -47,7 +47,7 @@ static int vhost_kernel_cleanup(struct vhost_dev *dev)
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
- return close(fd);
+ return close(fd) < 0 ? -errno : 0;
}
static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
@@ -58,7 +58,7 @@ static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
&s, NULL, NULL)) {
uint64_t val = g_ascii_strtoull(s, NULL, 10);
- if (!((val == G_MAXUINT64 || !val) && errno)) {
+ if (val < INT_MAX && val > 0) {
g_free(s);
return val;
}
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index bf6e50223c..662853513e 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -280,9 +280,10 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
r = qemu_chr_fe_read_all(chr, p, size);
if (r != size) {
+ int saved_errno = errno;
error_report("Failed to read msg header. Read %d instead of %d."
" Original request %d.", r, size, msg->hdr.request);
- return -1;
+ return r < 0 ? -saved_errno : -EIO;
}
/* validate received flags */
@@ -290,7 +291,7 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
error_report("Failed to read msg header."
" Flags 0x%x instead of 0x%x.", msg->hdr.flags,
VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
- return -1;
+ return -EPROTO;
}
return 0;
@@ -314,8 +315,9 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
uint8_t *p = (uint8_t *) msg;
int r, size;
- if (vhost_user_read_header(dev, msg) < 0) {
- data->ret = -1;
+ r = vhost_user_read_header(dev, msg);
+ if (r < 0) {
+ data->ret = r;
goto end;
}
@@ -324,7 +326,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
error_report("Failed to read msg header."
" Size %d exceeds the maximum %zu.", msg->hdr.size,
VHOST_USER_PAYLOAD_SIZE);
- data->ret = -1;
+ data->ret = -EPROTO;
goto end;
}
@@ -333,9 +335,10 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
size = msg->hdr.size;
r = qemu_chr_fe_read_all(chr, p, size);
if (r != size) {
+ int saved_errno = errno;
error_report("Failed to read msg payload."
" Read %d instead of %d.", r, msg->hdr.size);
- data->ret = -1;
+ data->ret = r < 0 ? -saved_errno : -EIO;
goto end;
}
}
@@ -418,24 +421,26 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
static int process_message_reply(struct vhost_dev *dev,
const VhostUserMsg *msg)
{
+ int ret;
VhostUserMsg msg_reply;
if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
return 0;
}
- if (vhost_user_read(dev, &msg_reply) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg_reply);
+ if (ret < 0) {
+ return ret;
}
if (msg_reply.hdr.request != msg->hdr.request) {
error_report("Received unexpected msg type. "
"Expected %d received %d",
msg->hdr.request, msg_reply.hdr.request);
- return -1;
+ return -EPROTO;
}
- return msg_reply.payload.u64 ? -1 : 0;
+ return msg_reply.payload.u64 ? -EIO : 0;
}
static bool vhost_user_one_time_request(VhostUserRequest request)
@@ -472,14 +477,15 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
error_report("Failed to set msg fds.");
- return -1;
+ return -EINVAL;
}
ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
if (ret != size) {
+ int saved_errno = errno;
error_report("Failed to write msg."
" Wrote %d instead of %d.", ret, size);
- return -1;
+ return ret < 0 ? -saved_errno : -EIO;
}
return 0;
@@ -502,6 +508,7 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
size_t fd_num = 0;
bool shmfd = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_LOG_SHMFD);
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_SET_LOG_BASE,
.hdr.flags = VHOST_USER_VERSION,
@@ -514,21 +521,23 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
fds[fd_num++] = log->fd;
}
- if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, fds, fd_num);
+ if (ret < 0) {
+ return ret;
}
if (shmfd) {
msg.hdr.size = 0;
- if (vhost_user_read(dev, &msg) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ return ret;
}
if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
error_report("Received unexpected msg type. "
"Expected %d received %d",
VHOST_USER_SET_LOG_BASE, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
}
@@ -588,7 +597,7 @@ static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
u->region_rb[i] = mr->ram_block;
} else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
error_report("Failed preparing vhost-user memory table msg");
- return -1;
+ return -ENOBUFS;
}
vhost_user_fill_msg_region(&region_buffer, reg, offset);
msg->payload.memory.regions[*fd_num] = region_buffer;
@@ -604,14 +613,14 @@ static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
if (!*fd_num) {
error_report("Failed initializing vhost-user memory map, "
"consider using -object memory-backend-file share=on");
- return -1;
+ return -EINVAL;
}
msg->hdr.size = sizeof(msg->payload.memory.nregions);
msg->hdr.size += sizeof(msg->payload.memory.padding);
msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
- return 1;
+ return 0;
}
static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
@@ -741,8 +750,9 @@ static int send_remove_regions(struct vhost_dev *dev,
vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
msg->payload.mem_reg.region = region_buffer;
- if (vhost_user_write(dev, msg, &fd, 1) < 0) {
- return -1;
+ ret = vhost_user_write(dev, msg, &fd, 1);
+ if (ret < 0) {
+ return ret;
}
if (reply_supported) {
@@ -801,15 +811,17 @@ static int send_add_regions(struct vhost_dev *dev,
vhost_user_fill_msg_region(&region_buffer, reg, offset);
msg->payload.mem_reg.region = region_buffer;
- if (vhost_user_write(dev, msg, &fd, 1) < 0) {
- return -1;
+ ret = vhost_user_write(dev, msg, &fd, 1);
+ if (ret < 0) {
+ return ret;
}
if (track_ramblocks) {
uint64_t reply_gpa;
- if (vhost_user_read(dev, &msg_reply) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg_reply);
+ if (ret < 0) {
+ return ret;
}
reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
@@ -819,7 +831,7 @@ static int send_add_regions(struct vhost_dev *dev,
"Expected %d received %d", __func__,
VHOST_USER_ADD_MEM_REG,
msg_reply.hdr.request);
- return -1;
+ return -EPROTO;
}
/*
@@ -830,7 +842,7 @@ static int send_add_regions(struct vhost_dev *dev,
error_report("%s: Unexpected size for postcopy reply "
"%d vs %d", __func__, msg_reply.hdr.size,
msg->hdr.size);
- return -1;
+ return -EPROTO;
}
/* Get the postcopy client base from the backend's reply. */
@@ -846,7 +858,7 @@ static int send_add_regions(struct vhost_dev *dev,
"Got guest physical address %" PRIX64 ", expected "
"%" PRIX64, __func__, reply_gpa,
dev->mem->regions[reg_idx].guest_phys_addr);
- return -1;
+ return -EPROTO;
}
} else if (reply_supported) {
ret = process_message_reply(dev, msg);
@@ -887,6 +899,7 @@ static int vhost_user_add_remove_regions(struct vhost_dev *dev,
struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
int nr_add_reg, nr_rem_reg;
+ int ret;
msg->hdr.size = sizeof(msg->payload.mem_reg);
@@ -894,16 +907,20 @@ static int vhost_user_add_remove_regions(struct vhost_dev *dev,
scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
shadow_pcb, track_ramblocks);
- if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
- reply_supported) < 0)
- {
- goto err;
+ if (nr_rem_reg) {
+ ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
+ reply_supported);
+ if (ret < 0) {
+ goto err;
+ }
}
- if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg,
- shadow_pcb, reply_supported, track_ramblocks) < 0)
- {
- goto err;
+ if (nr_add_reg) {
+ ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb,
+ reply_supported, track_ramblocks);
+ if (ret < 0) {
+ goto err;
+ }
}
if (track_ramblocks) {
@@ -918,8 +935,9 @@ static int vhost_user_add_remove_regions(struct vhost_dev *dev,
msg->hdr.size = sizeof(msg->payload.u64);
msg->payload.u64 = 0; /* OK */
- if (vhost_user_write(dev, msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
}
@@ -931,7 +949,7 @@ err:
sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
}
- return -1;
+ return ret;
}
static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
@@ -944,6 +962,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
size_t fd_num = 0;
VhostUserMsg msg_reply;
int region_i, msg_i;
+ int ret;
VhostUserMsg msg = {
.hdr.flags = VHOST_USER_VERSION,
@@ -961,29 +980,32 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
}
if (config_mem_slots) {
- if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
- true) < 0) {
- return -1;
+ ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true);
+ if (ret < 0) {
+ return ret;
}
} else {
- if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
- true) < 0) {
- return -1;
+ ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
+ true);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, fds, fd_num);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_read(dev, &msg_reply) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg_reply);
+ if (ret < 0) {
+ return ret;
}
if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
error_report("%s: Received unexpected msg type."
"Expected %d received %d", __func__,
VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
- return -1;
+ return -EPROTO;
}
/*
@@ -994,7 +1016,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
error_report("%s: Unexpected size for postcopy reply "
"%d vs %d", __func__, msg_reply.hdr.size,
msg.hdr.size);
- return -1;
+ return -EPROTO;
}
memset(u->postcopy_client_bases, 0,
@@ -1024,7 +1046,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
error_report("%s: postcopy reply not fully consumed "
"%d vs %zd",
__func__, msg_i, fd_num);
- return -1;
+ return -EIO;
}
/*
@@ -1035,8 +1057,9 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
/* TODO: Use this for failure cases as well with a bad value. */
msg.hdr.size = sizeof(msg.payload.u64);
msg.payload.u64 = 0; /* OK */
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
}
@@ -1055,6 +1078,7 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
bool config_mem_slots =
virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
+ int ret;
if (do_postcopy) {
/*
@@ -1074,17 +1098,20 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
}
if (config_mem_slots) {
- if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
- false) < 0) {
- return -1;
+ ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false);
+ if (ret < 0) {
+ return ret;
}
} else {
- if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
- false) < 0) {
- return -1;
+ ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
+ false);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
- return -1;
+
+ ret = vhost_user_write(dev, &msg, fds, fd_num);
+ if (ret < 0) {
+ return ret;
}
if (reply_supported) {
@@ -1109,14 +1136,10 @@ static int vhost_user_set_vring_endian(struct vhost_dev *dev,
if (!cross_endian) {
error_report("vhost-user trying to send unhandled ioctl");
- return -1;
+ return -ENOTSUP;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_set_vring(struct vhost_dev *dev,
@@ -1130,11 +1153,7 @@ static int vhost_set_vring(struct vhost_dev *dev,
.hdr.size = sizeof(msg.payload.state),
};
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_user_set_vring_num(struct vhost_dev *dev,
@@ -1182,16 +1201,25 @@ static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
int i;
if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
- return -1;
+ return -EINVAL;
}
for (i = 0; i < dev->nvqs; ++i) {
+ int ret;
struct vhost_vring_state state = {
.index = dev->vq_index + i,
.num = enable,
};
- vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
+ ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
+ if (ret < 0) {
+ /*
+ * Restoring the previous state is likely infeasible, as well as
+ * proceeding regardless the error, so just bail out and hope for
+ * the device-level recovery.
+ */
+ return ret;
+ }
}
return 0;
@@ -1200,6 +1228,7 @@ static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
static int vhost_user_get_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_GET_VRING_BASE,
.hdr.flags = VHOST_USER_VERSION,
@@ -1209,23 +1238,25 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
vhost_user_host_notifier_remove(dev, ring->index);
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ return ret;
}
if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
error_report("Received unexpected msg type. Expected %d received %d",
VHOST_USER_GET_VRING_BASE, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.state)) {
error_report("Received bad msg size.");
- return -1;
+ return -EPROTO;
}
*ring = msg.payload.state;
@@ -1252,11 +1283,7 @@ static int vhost_set_vring_file(struct vhost_dev *dev,
msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
}
- if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, fds, fd_num);
}
static int vhost_user_set_vring_kick(struct vhost_dev *dev,
@@ -1274,6 +1301,7 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev,
static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = request,
.hdr.flags = VHOST_USER_VERSION,
@@ -1283,23 +1311,25 @@ static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
return 0;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ return ret;
}
if (msg.hdr.request != request) {
error_report("Received unexpected msg type. Expected %d received %d",
request, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.u64)) {
error_report("Received bad msg size.");
- return -1;
+ return -EPROTO;
}
*u64 = msg.payload.u64;
@@ -1337,6 +1367,7 @@ static int enforce_reply(struct vhost_dev *dev,
static int vhost_user_set_vring_addr(struct vhost_dev *dev,
struct vhost_vring_addr *addr)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_SET_VRING_ADDR,
.hdr.flags = VHOST_USER_VERSION,
@@ -1357,8 +1388,9 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
if (wait_for_reply) {
@@ -1377,6 +1409,7 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
.payload.u64 = u64,
.hdr.size = sizeof(msg.payload.u64),
};
+ int ret;
if (wait_for_reply) {
bool reply_supported = virtio_has_feature(dev->protocol_features,
@@ -1386,8 +1419,9 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
}
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
if (wait_for_reply) {
@@ -1424,11 +1458,7 @@ static int vhost_user_set_owner(struct vhost_dev *dev)
.hdr.flags = VHOST_USER_VERSION,
};
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -EPROTO;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_user_get_max_memslots(struct vhost_dev *dev,
@@ -1459,26 +1489,16 @@ static int vhost_user_reset_device(struct vhost_dev *dev)
? VHOST_USER_RESET_DEVICE
: VHOST_USER_RESET_OWNER;
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
{
- int ret = -1;
-
- if (!dev->config_ops) {
- return -1;
+ if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
+ return -ENOSYS;
}
- if (dev->config_ops->vhost_dev_config_notifier) {
- ret = dev->config_ops->vhost_dev_config_notifier(dev);
- }
-
- return ret;
+ return dev->config_ops->vhost_dev_config_notifier(dev);
}
static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
@@ -1497,7 +1517,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
if (!virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
- return -1;
+ return -EINVAL;
}
n = &user->notifier[queue_idx];
@@ -1515,13 +1535,13 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
/* Sanity check. */
if (area->size != page_size) {
- return -1;
+ return -EINVAL;
}
addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, area->offset);
if (addr == MAP_FAILED) {
- return -1;
+ return -EFAULT;
}
name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
@@ -1534,7 +1554,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
object_unparent(OBJECT(&n->mr));
munmap(addr, page_size);
- return -1;
+ return -ENXIO;
}
n->addr = addr;
@@ -1664,14 +1684,15 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev)
}
if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
+ int saved_errno = errno;
error_report("socketpair() failed");
- return -1;
+ return -saved_errno;
}
ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
if (!ioc) {
error_report_err(local_err);
- return -1;
+ return -ECONNREFUSED;
}
u->slave_ioc = ioc;
slave_update_read_handler(dev, NULL);
@@ -1778,35 +1799,38 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
struct vhost_user *u = dev->opaque;
CharBackend *chr = u->user->chr;
int ufd;
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_POSTCOPY_ADVISE,
.hdr.flags = VHOST_USER_VERSION,
};
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
error_setg(errp, "Failed to send postcopy_advise to vhost");
- return -1;
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
error_setg(errp, "Failed to get postcopy_advise reply from vhost");
- return -1;
+ return ret;
}
if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
error_setg(errp, "Unexpected msg type. Expected %d received %d",
VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size) {
error_setg(errp, "Received bad msg size.");
- return -1;
+ return -EPROTO;
}
ufd = qemu_chr_fe_get_msgfd(chr);
if (ufd < 0) {
error_setg(errp, "%s: Failed to get ufd", __func__);
- return -1;
+ return -EIO;
}
qemu_set_nonblock(ufd);
@@ -1820,7 +1844,7 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
return 0;
#else
error_setg(errp, "Postcopy not supported on non-Linux systems");
- return -1;
+ return -ENOSYS;
#endif
}
@@ -1836,10 +1860,13 @@ static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
.hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
};
u->postcopy_listen = true;
+
trace_vhost_user_postcopy_listen();
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
+
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
error_setg(errp, "Failed to send postcopy_listen to vhost");
- return -1;
+ return ret;
}
ret = process_message_reply(dev, &msg);
@@ -1864,9 +1891,11 @@ static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
struct vhost_user *u = dev->opaque;
trace_vhost_user_postcopy_end_entry();
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
+
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
error_setg(errp, "Failed to send postcopy_end to vhost");
- return -1;
+ return ret;
}
ret = process_message_reply(dev, &msg);
@@ -2115,7 +2144,7 @@ static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
return vhost_user_write(dev, &msg, NULL, 0);
}
- return -1;
+ return -ENOTSUP;
}
static bool vhost_user_can_merge(struct vhost_dev *dev,
@@ -2136,6 +2165,7 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
VhostUserMsg msg;
bool reply_supported = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK);
+ int ret;
if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
return 0;
@@ -2149,8 +2179,9 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
/* If reply_ack supported, slave has to ack specified MTU is valid */
@@ -2164,6 +2195,7 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *imsg)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_IOTLB_MSG,
.hdr.size = sizeof(msg.payload.iotlb),
@@ -2171,8 +2203,9 @@ static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
.payload.iotlb = *imsg,
};
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -EFAULT;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
return process_message_reply(dev, &msg);
@@ -2187,6 +2220,7 @@ static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
uint32_t config_len, Error **errp)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_GET_CONFIG,
.hdr.flags = VHOST_USER_VERSION,
@@ -2203,26 +2237,28 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
msg.payload.config.offset = 0;
msg.payload.config.size = config_len;
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- error_setg_errno(errp, EPROTO, "vhost_get_config failed");
- return -EPROTO;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "vhost_get_config failed");
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- error_setg_errno(errp, EPROTO, "vhost_get_config failed");
- return -EPROTO;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "vhost_get_config failed");
+ return ret;
}
if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
error_setg(errp,
"Received unexpected msg type. Expected %d received %d",
VHOST_USER_GET_CONFIG, msg.hdr.request);
- return -EINVAL;
+ return -EPROTO;
}
if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
error_setg(errp, "Received bad msg size.");
- return -EINVAL;
+ return -EPROTO;
}
memcpy(config, msg.payload.config.region, config_len);
@@ -2233,6 +2269,7 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
uint32_t offset, uint32_t size, uint32_t flags)
{
+ int ret;
uint8_t *p;
bool reply_supported = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK);
@@ -2245,7 +2282,7 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
if (!virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CONFIG)) {
- return -1;
+ return -ENOTSUP;
}
if (reply_supported) {
@@ -2253,7 +2290,7 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
}
if (size > VHOST_USER_MAX_CONFIG_SIZE) {
- return -1;
+ return -EINVAL;
}
msg.payload.config.offset = offset,
@@ -2262,8 +2299,9 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
p = msg.payload.config.region;
memcpy(p, data, size);
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
if (reply_supported) {
@@ -2277,6 +2315,7 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev,
void *session_info,
uint64_t *session_id)
{
+ int ret;
bool crypto_session = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
CryptoDevBackendSymSessionInfo *sess_info = session_info;
@@ -2290,7 +2329,7 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev,
if (!crypto_session) {
error_report("vhost-user trying to send unhandled ioctl");
- return -1;
+ return -ENOTSUP;
}
memcpy(&msg.payload.session.session_setup_data, sess_info,
@@ -2303,31 +2342,35 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev,
memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
sess_info->auth_key_len);
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- error_report("vhost_user_write() return -1, create session failed");
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ error_report("vhost_user_write() return %d, create session failed",
+ ret);
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- error_report("vhost_user_read() return -1, create session failed");
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ error_report("vhost_user_read() return %d, create session failed",
+ ret);
+ return ret;
}
if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
error_report("Received unexpected msg type. Expected %d received %d",
VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.session)) {
error_report("Received bad msg size.");
- return -1;
+ return -EPROTO;
}
if (msg.payload.session.session_id < 0) {
error_report("Bad session id: %" PRId64 "",
msg.payload.session.session_id);
- return -1;
+ return -EINVAL;
}
*session_id = msg.payload.session.session_id;
@@ -2337,6 +2380,7 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev,
static int
vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
{
+ int ret;
bool crypto_session = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
VhostUserMsg msg = {
@@ -2348,12 +2392,14 @@ vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
if (!crypto_session) {
error_report("vhost-user trying to send unhandled ioctl");
- return -1;
+ return -ENOTSUP;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- error_report("vhost_user_write() return -1, close session failed");
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ error_report("vhost_user_write() return %d, close session failed",
+ ret);
+ return ret;
}
return 0;
@@ -2375,6 +2421,7 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
{
void *addr;
int fd;
+ int ret;
struct vhost_user *u = dev->opaque;
CharBackend *chr = u->user->chr;
VhostUserMsg msg = {
@@ -2390,24 +2437,26 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
return 0;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ return ret;
}
if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
error_report("Received unexpected msg type. "
"Expected %d received %d",
VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.inflight)) {
error_report("Received bad msg size.");
- return -1;
+ return -EPROTO;
}
if (!msg.payload.inflight.mmap_size) {
@@ -2417,7 +2466,7 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
fd = qemu_chr_fe_get_msgfd(chr);
if (fd < 0) {
error_report("Failed to get mem fd");
- return -1;
+ return -EIO;
}
addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
@@ -2426,7 +2475,7 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
if (addr == MAP_FAILED) {
error_report("Failed to mmap mem fd");
close(fd);
- return -1;
+ return -EFAULT;
}
inflight->addr = addr;
@@ -2456,11 +2505,7 @@ static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
return 0;
}
- if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, &inflight->fd, 1);
}
bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index bcaf00e09f..04ea43704f 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -292,18 +292,34 @@ static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
return ret < 0 ? -errno : ret;
}
-static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
+static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
{
uint8_t s;
+ int ret;
trace_vhost_vdpa_add_status(dev, status);
- if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
- return;
+ ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
+ if (ret < 0) {
+ return ret;
}
s |= status;
- vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
+ ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!(s & status)) {
+ return -EIO;
+ }
+
+ return 0;
}
static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
@@ -484,7 +500,7 @@ static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
}
}
if (mem->padding) {
- return -1;
+ return -EINVAL;
}
return 0;
@@ -501,14 +517,11 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
trace_vhost_vdpa_set_features(dev, features);
ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
- uint8_t status = 0;
if (ret) {
return ret;
}
- vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
- vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
- return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
+ return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
}
static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
@@ -650,12 +663,8 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
}
if (started) {
- uint8_t status = 0;
memory_listener_register(&v->listener, &address_space_memory);
- vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
- vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
-
- return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
+ return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
} else {
vhost_vdpa_reset_device(dev);
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
index 478c0c9a87..433d42d897 100644
--- a/hw/virtio/vhost-vsock.c
+++ b/hw/virtio/vhost-vsock.c
@@ -171,6 +171,10 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp)
ret = vhost_dev_init(&vvc->vhost_dev, (void *)(uintptr_t)vhostfd,
VHOST_BACKEND_TYPE_KERNEL, 0, errp);
if (ret < 0) {
+ /*
+ * vhostfd is closed by vhost_dev_cleanup, which is called
+ * by vhost_dev_init on initialization error.
+ */
goto err_virtio;
}
@@ -183,15 +187,10 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp)
return;
err_vhost_dev:
- vhost_dev_cleanup(&vvc->vhost_dev);
/* vhost_dev_cleanup() closes the vhostfd passed to vhost_dev_init() */
- vhostfd = -1;
+ vhost_dev_cleanup(&vvc->vhost_dev);
err_virtio:
vhost_vsock_common_unrealize(vdev);
- if (vhostfd >= 0) {
- close(vhostfd);
- }
- return;
}
static void vhost_vsock_device_unrealize(DeviceState *dev)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 20913cf8fb..7b03efccec 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -33,11 +33,13 @@
#define _VHOST_DEBUG 1
#ifdef _VHOST_DEBUG
-#define VHOST_OPS_DEBUG(fmt, ...) \
- do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
- strerror(errno), errno); } while (0)
+#define VHOST_OPS_DEBUG(retval, fmt, ...) \
+ do { \
+ error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
+ strerror(-retval), -retval); \
+ } while (0)
#else
-#define VHOST_OPS_DEBUG(fmt, ...) \
+#define VHOST_OPS_DEBUG(retval, fmt, ...) \
do { } while (0)
#endif
@@ -297,7 +299,7 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
releasing the current log, to ensure no logging is lost */
r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_log_base failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
}
vhost_log_put(dev, true);
@@ -550,7 +552,7 @@ static void vhost_commit(MemoryListener *listener)
if (!dev->log_enabled) {
r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_mem_table failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
}
goto out;
}
@@ -564,7 +566,7 @@ static void vhost_commit(MemoryListener *listener)
}
r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_mem_table failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
}
/* To log less, can only decrease log size after table update. */
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
@@ -803,8 +805,8 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
if (dev->vhost_ops->vhost_vq_get_addr) {
r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_vq_get_addr failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed");
+ return r;
}
} else {
addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc;
@@ -816,10 +818,9 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed");
}
- return 0;
+ return r;
}
static int vhost_dev_set_features(struct vhost_dev *dev,
@@ -840,19 +841,19 @@ static int vhost_dev_set_features(struct vhost_dev *dev,
}
r = dev->vhost_ops->vhost_set_features(dev, features);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_features failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_features failed");
goto out;
}
if (dev->vhost_ops->vhost_set_backend_cap) {
r = dev->vhost_ops->vhost_set_backend_cap(dev);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_backend_cap failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed");
goto out;
}
}
out:
- return r < 0 ? -errno : 0;
+ return r;
}
static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
@@ -999,22 +1000,17 @@ static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
bool is_big_endian,
int vhost_vq_index)
{
+ int r;
struct vhost_vring_state s = {
.index = vhost_vq_index,
.num = is_big_endian
};
- if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
- return 0;
- }
-
- VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
- if (errno == ENOTTY) {
- error_report("vhost does not support cross-endian");
- return -ENOSYS;
+ r = dev->vhost_ops->vhost_set_vring_endian(dev, &s);
+ if (r < 0) {
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed");
}
-
- return -errno;
+ return r;
}
static int vhost_memory_region_lookup(struct vhost_dev *hdev,
@@ -1106,15 +1102,15 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
vq->num = state.num = virtio_queue_get_num(vdev, idx);
r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_num failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed");
+ return r;
}
state.num = virtio_queue_get_last_avail_idx(vdev, idx);
r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_base failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed");
+ return r;
}
if (vhost_needs_vring_endian(vdev)) {
@@ -1122,7 +1118,7 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
virtio_is_big_endian(vdev),
vhost_vq_index);
if (r) {
- return -errno;
+ return r;
}
}
@@ -1150,15 +1146,13 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
if (r < 0) {
- r = -errno;
goto fail_alloc;
}
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
goto fail_kick;
}
@@ -1218,7 +1212,7 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r);
+ VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r);
/* Connection to the backend is broken, so let's sync internal
* last avail idx to the device used idx.
*/
@@ -1274,7 +1268,7 @@ static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed");
return r;
}
@@ -1296,8 +1290,7 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
file.fd = event_notifier_get_fd(&vq->masked_notifier);
r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_call failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
goto fail_call;
}
@@ -1557,7 +1550,7 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_vring_call failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
}
}
@@ -1599,7 +1592,7 @@ int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
}
error_setg(errp, "vhost_get_config not implemented");
- return -ENOTSUP;
+ return -ENOSYS;
}
int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
@@ -1612,7 +1605,7 @@ int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
size, flags);
}
- return -1;
+ return -ENOSYS;
}
void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
@@ -1641,7 +1634,7 @@ static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
if (err) {
error_report_err(err);
- return -1;
+ return -ENOMEM;
}
vhost_dev_free_inflight(inflight);
@@ -1674,8 +1667,9 @@ int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
}
if (inflight->size != size) {
- if (vhost_dev_resize_inflight(inflight, size)) {
- return -1;
+ int ret = vhost_dev_resize_inflight(inflight, size);
+ if (ret < 0) {
+ return ret;
}
}
inflight->queue_size = qemu_get_be16(f);
@@ -1698,7 +1692,7 @@ int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev)
r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_dev_prepare_inflight failed");
+ VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed");
return r;
}
@@ -1713,8 +1707,8 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_inflight_fd failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed");
+ return r;
}
}
@@ -1729,8 +1723,8 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
if (dev->vhost_ops->vhost_get_inflight_fd) {
r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
if (r) {
- VHOST_OPS_DEBUG("vhost_get_inflight_fd failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed");
+ return r;
}
}
@@ -1759,8 +1753,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_mem_table failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
goto fail_mem;
}
for (i = 0; i < hdev->nvqs; ++i) {
@@ -1784,8 +1777,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
hdev->log_size ? log_base : 0,
hdev->log);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_log_base failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
goto fail_log;
}
}
@@ -1860,5 +1852,5 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
}
- return -1;
+ return -ENOSYS;
}
diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c
index d5a578142b..04c223b0c9 100644
--- a/hw/virtio/virtio-mem.c
+++ b/hw/virtio/virtio-mem.c
@@ -33,6 +33,14 @@
#include "trace.h"
/*
+ * We only had legacy x86 guests that did not support
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE. Other targets don't have legacy guests.
+ */
+#if defined(TARGET_X86_64) || defined(TARGET_I386)
+#define VIRTIO_MEM_HAS_LEGACY_GUESTS
+#endif
+
+/*
* Let's not allow blocks smaller than 1 MiB, for example, to keep the tracking
* bitmap small.
*/
@@ -110,6 +118,19 @@ static uint64_t virtio_mem_default_block_size(RAMBlock *rb)
return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE);
}
+#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS)
+static bool virtio_mem_has_shared_zeropage(RAMBlock *rb)
+{
+ /*
+ * We only have a guaranteed shared zeropage on ordinary MAP_PRIVATE
+ * anonymous RAM. In any other case, reading unplugged *can* populate a
+ * fresh page, consuming actual memory.
+ */
+ return !qemu_ram_is_shared(rb) && rb->fd < 0 &&
+ qemu_ram_pagesize(rb) == qemu_real_host_page_size;
+}
+#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */
+
/*
* Size the usable region bigger than the requested size if possible. Esp.
* Linux guests will only add (aligned) memory blocks in case they fully
@@ -429,10 +450,40 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa,
return -EBUSY;
}
virtio_mem_notify_unplug(vmem, offset, size);
- } else if (virtio_mem_notify_plug(vmem, offset, size)) {
- /* Could be a mapping attempt resulted in memory getting populated. */
- ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size);
- return -EBUSY;
+ } else {
+ int ret = 0;
+
+ if (vmem->prealloc) {
+ void *area = memory_region_get_ram_ptr(&vmem->memdev->mr) + offset;
+ int fd = memory_region_get_fd(&vmem->memdev->mr);
+ Error *local_err = NULL;
+
+ os_mem_prealloc(fd, area, size, 1, &local_err);
+ if (local_err) {
+ static bool warned;
+
+ /*
+ * Warn only once, we don't want to fill the log with these
+ * warnings.
+ */
+ if (!warned) {
+ warn_report_err(local_err);
+ warned = true;
+ } else {
+ error_free(local_err);
+ }
+ ret = -EBUSY;
+ }
+ }
+ if (!ret) {
+ ret = virtio_mem_notify_plug(vmem, offset, size);
+ }
+
+ if (ret) {
+ /* Could be preallocation or a notifier populated memory. */
+ ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size);
+ return -EBUSY;
+ }
}
virtio_mem_set_bitmap(vmem, start_gpa, size, plug);
return 0;
@@ -653,15 +704,29 @@ static uint64_t virtio_mem_get_features(VirtIODevice *vdev, uint64_t features,
Error **errp)
{
MachineState *ms = MACHINE(qdev_get_machine());
+ VirtIOMEM *vmem = VIRTIO_MEM(vdev);
if (ms->numa_state) {
#if defined(CONFIG_ACPI)
virtio_add_feature(&features, VIRTIO_MEM_F_ACPI_PXM);
#endif
}
+ assert(vmem->unplugged_inaccessible != ON_OFF_AUTO_AUTO);
+ if (vmem->unplugged_inaccessible == ON_OFF_AUTO_ON) {
+ virtio_add_feature(&features, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE);
+ }
return features;
}
+static int virtio_mem_validate_features(VirtIODevice *vdev)
+{
+ if (virtio_host_has_feature(vdev, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE) &&
+ !virtio_vdev_has_feature(vdev, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE)) {
+ return -EFAULT;
+ }
+ return 0;
+}
+
static void virtio_mem_system_reset(void *opaque)
{
VirtIOMEM *vmem = VIRTIO_MEM(opaque);
@@ -716,6 +781,29 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
rb = vmem->memdev->mr.ram_block;
page_size = qemu_ram_pagesize(rb);
+#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS)
+ switch (vmem->unplugged_inaccessible) {
+ case ON_OFF_AUTO_AUTO:
+ if (virtio_mem_has_shared_zeropage(rb)) {
+ vmem->unplugged_inaccessible = ON_OFF_AUTO_OFF;
+ } else {
+ vmem->unplugged_inaccessible = ON_OFF_AUTO_ON;
+ }
+ break;
+ case ON_OFF_AUTO_OFF:
+ if (!virtio_mem_has_shared_zeropage(rb)) {
+ warn_report("'%s' property set to 'off' with a memdev that does"
+ " not support the shared zeropage.",
+ VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP);
+ }
+ break;
+ default:
+ break;
+ }
+#else /* VIRTIO_MEM_HAS_LEGACY_GUESTS */
+ vmem->unplugged_inaccessible = ON_OFF_AUTO_ON;
+#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */
+
/*
* If the block size wasn't configured by the user, use a sane default. This
* allows using hugetlbfs backends of any page size without manual
@@ -733,7 +821,8 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
warn_report("'%s' property is smaller than the default block size (%"
PRIx64 " MiB)", VIRTIO_MEM_BLOCK_SIZE_PROP,
virtio_mem_default_block_size(rb) / MiB);
- } else if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) {
+ }
+ if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) {
error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64
")", VIRTIO_MEM_REQUESTED_SIZE_PROP,
VIRTIO_MEM_BLOCK_SIZE_PROP, vmem->block_size);
@@ -1107,8 +1196,13 @@ static void virtio_mem_instance_init(Object *obj)
static Property virtio_mem_properties[] = {
DEFINE_PROP_UINT64(VIRTIO_MEM_ADDR_PROP, VirtIOMEM, addr, 0),
DEFINE_PROP_UINT32(VIRTIO_MEM_NODE_PROP, VirtIOMEM, node, 0),
+ DEFINE_PROP_BOOL(VIRTIO_MEM_PREALLOC_PROP, VirtIOMEM, prealloc, false),
DEFINE_PROP_LINK(VIRTIO_MEM_MEMDEV_PROP, VirtIOMEM, memdev,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
+#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS)
+ DEFINE_PROP_ON_OFF_AUTO(VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP, VirtIOMEM,
+ unplugged_inaccessible, ON_OFF_AUTO_AUTO),
+#endif
DEFINE_PROP_END_OF_LIST(),
};
@@ -1247,6 +1341,7 @@ static void virtio_mem_class_init(ObjectClass *klass, void *data)
vdc->unrealize = virtio_mem_device_unrealize;
vdc->get_config = virtio_mem_get_config;
vdc->get_features = virtio_mem_get_features;
+ vdc->validate_features = virtio_mem_validate_features;
vdc->vmsd = &vmstate_virtio_mem_device;
vmc->fill_device_info = virtio_mem_fill_device_info;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index e11a8a0dba..5d18868d7d 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -885,6 +885,7 @@ static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
if (vq->used_idx >= vq->vring.num) {
vq->used_idx -= vq->vring.num;
vq->used_wrap_counter ^= 1;
+ vq->signalled_used_valid = false;
}
}
diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h
index 08e1beec85..102a1e7f50 100644
--- a/include/disas/dis-asm.h
+++ b/include/disas/dis-asm.h
@@ -459,6 +459,7 @@ int print_insn_nios2(bfd_vma, disassemble_info*);
int print_insn_xtensa (bfd_vma, disassemble_info*);
int print_insn_riscv32 (bfd_vma, disassemble_info*);
int print_insn_riscv64 (bfd_vma, disassemble_info*);
+int print_insn_riscv128 (bfd_vma, disassemble_info*);
int print_insn_rx(bfd_vma, disassemble_info *);
int print_insn_hexagon(bfd_vma, disassemble_info *);
diff --git a/include/exec/memop.h b/include/exec/memop.h
index 04264ffd6b..2a885f3917 100644
--- a/include/exec/memop.h
+++ b/include/exec/memop.h
@@ -85,29 +85,36 @@ typedef enum MemOp {
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
+ MO_UQ = MO_64,
+ MO_UO = MO_128,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
- MO_Q = MO_64,
+ MO_SQ = MO_SIGN | MO_64,
+ MO_SO = MO_SIGN | MO_128,
MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
+ MO_LEUQ = MO_LE | MO_UQ,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
- MO_LEQ = MO_LE | MO_Q,
+ MO_LESQ = MO_LE | MO_SQ,
MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
+ MO_BEUQ = MO_BE | MO_UQ,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
- MO_BEQ = MO_BE | MO_Q,
+ MO_BESQ = MO_BE | MO_SQ,
#ifdef NEED_CPU_H
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
+ MO_TEUQ = MO_TE | MO_UQ,
+ MO_TEUO = MO_TE | MO_UO,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
- MO_TEQ = MO_TE | MO_Q,
+ MO_TESQ = MO_TE | MO_SQ,
#endif
MO_SSIZE = MO_SIZE | MO_SIGN,
diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h
index 8139358549..18fb7eed46 100644
--- a/include/hw/arm/aspeed_soc.h
+++ b/include/hw/arm/aspeed_soc.h
@@ -139,6 +139,8 @@ enum {
ASPEED_DEV_EMMC,
ASPEED_DEV_KCS,
ASPEED_DEV_HACE,
+ ASPEED_DEV_DPMCU,
+ ASPEED_DEV_DP,
};
#endif /* ASPEED_SOC_H */
diff --git a/include/hw/boards.h b/include/hw/boards.h
index f49a2578ea..c92ac8815c 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -380,6 +380,9 @@ struct MachineState {
} \
type_init(machine_initfn##_register_types)
+extern GlobalProperty hw_compat_6_2[];
+extern const size_t hw_compat_6_2_len;
+
extern GlobalProperty hw_compat_6_1[];
extern const size_t hw_compat_6_1_len;
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index e948e81f1a..76ab3b851c 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -413,6 +413,9 @@ struct CPUState {
bool ignore_memory_transaction_failures;
+ /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
+ bool prctl_unalign_sigbus;
+
struct hax_vcpu_state *hax_vcpu;
struct hvf_vcpu_state *hvf;
diff --git a/include/hw/firmware/smbios.h b/include/hw/firmware/smbios.h
index 5a0dd0c8cf..4b7ad77a44 100644
--- a/include/hw/firmware/smbios.h
+++ b/include/hw/firmware/smbios.h
@@ -1,6 +1,8 @@
#ifndef QEMU_SMBIOS_H
#define QEMU_SMBIOS_H
+#include "qapi/qapi-types-machine.h"
+
/*
* SMBIOS Support
*
@@ -23,14 +25,6 @@ struct smbios_phys_mem_area {
uint64_t length;
};
-/*
- * SMBIOS spec defined tables
- */
-typedef enum SmbiosEntryPointType {
- SMBIOS_ENTRY_POINT_21,
- SMBIOS_ENTRY_POINT_30,
-} SmbiosEntryPointType;
-
/* SMBIOS Entry Point
* There are two types of entry points defined in the SMBIOS specification
* (see below). BIOS must place the entry point(s) at a 16-byte-aligned
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index 9ab39e428f..9c9f4ac748 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -13,6 +13,7 @@
#include "hw/hotplug.h"
#include "qom/object.h"
#include "hw/i386/sgx-epc.h"
+#include "hw/firmware/smbios.h"
#define HPET_INTCAP "hpet-intcap"
@@ -40,6 +41,7 @@ typedef struct PCMachineState {
/* Configuration options: */
uint64_t max_ram_below_4g;
OnOffAuto vmport;
+ SmbiosEntryPointType smbios_entry_point_type;
bool acpi_build_enabled;
bool smbus_enabled;
@@ -63,6 +65,8 @@ typedef struct PCMachineState {
#define PC_MACHINE_SATA "sata"
#define PC_MACHINE_PIT "pit"
#define PC_MACHINE_MAX_FW_SIZE "max-fw-size"
+#define PC_MACHINE_SMBIOS_EP "smbios-entry-point-type"
+
/**
* PCMachineClass:
*
@@ -196,6 +200,9 @@ void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
/* sgx.c */
void pc_machine_init_sgx_epc(PCMachineState *pcms);
+extern GlobalProperty pc_compat_6_2[];
+extern const size_t pc_compat_6_2_len;
+
extern GlobalProperty pc_compat_6_1[];
extern const size_t pc_compat_6_1_len;
diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h
index 4e79145dde..b32c697207 100644
--- a/include/hw/intc/arm_gicv3_its_common.h
+++ b/include/hw/intc/arm_gicv3_its_common.h
@@ -46,17 +46,14 @@ typedef struct {
bool indirect;
uint16_t entry_sz;
uint32_t page_sz;
- uint32_t max_entries;
- union {
- uint32_t max_devids;
- uint32_t max_collids;
- } maxids;
+ uint32_t num_entries;
+ uint32_t num_ids;
uint64_t base_addr;
} TableDesc;
typedef struct {
bool valid;
- uint32_t max_entries;
+ uint32_t num_entries;
uint64_t base_addr;
} CmdQDesc;
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index 5b36334a28..483d5c7c72 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -735,6 +735,11 @@ void lsi53c8xx_handle_legacy_cmdline(DeviceState *lsi_dev);
qemu_irq pci_allocate_irq(PCIDevice *pci_dev);
void pci_set_irq(PCIDevice *pci_dev, int level);
+static inline int pci_intx(PCIDevice *pci_dev)
+{
+ return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1;
+}
+
static inline void pci_irq_assert(PCIDevice *pci_dev)
{
pci_set_irq(pci_dev, 1);
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
index b8ef99f348..6e9f61ccd9 100644
--- a/include/hw/riscv/virt.h
+++ b/include/hw/riscv/virt.h
@@ -24,7 +24,7 @@
#include "hw/block/flash.h"
#include "qom/object.h"
-#define VIRT_CPUS_MAX 8
+#define VIRT_CPUS_MAX 32
#define VIRT_SOCKETS_MAX 8
#define TYPE_RISCV_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
diff --git a/include/hw/virtio/virtio-mem.h b/include/hw/virtio/virtio-mem.h
index a5dd6a493b..7745cfc1a3 100644
--- a/include/hw/virtio/virtio-mem.h
+++ b/include/hw/virtio/virtio-mem.h
@@ -30,6 +30,8 @@ OBJECT_DECLARE_TYPE(VirtIOMEM, VirtIOMEMClass,
#define VIRTIO_MEM_REQUESTED_SIZE_PROP "requested-size"
#define VIRTIO_MEM_BLOCK_SIZE_PROP "block-size"
#define VIRTIO_MEM_ADDR_PROP "memaddr"
+#define VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP "unplugged-inaccessible"
+#define VIRTIO_MEM_PREALLOC_PROP "prealloc"
struct VirtIOMEM {
VirtIODevice parent_obj;
@@ -62,6 +64,16 @@ struct VirtIOMEM {
/* block size and alignment */
uint64_t block_size;
+ /*
+ * Whether we indicate VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE to the guest.
+ * For !x86 targets this will always be "on" and consequently indicate
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE.
+ */
+ OnOffAuto unplugged_inaccessible;
+
+ /* whether to prealloc memory when plugging new blocks */
+ bool prealloc;
+
/* notifiers to notify when "size" changes */
NotifierList size_change_notifiers;
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
index b6d517aea4..2c4064256c 100644
--- a/include/qemu/int128.h
+++ b/include/qemu/int128.h
@@ -172,6 +172,26 @@ static inline Int128 bswap128(Int128 a)
#endif
}
+static inline Int128 int128_divu(Int128 a, Int128 b)
+{
+ return (__uint128_t)a / (__uint128_t)b;
+}
+
+static inline Int128 int128_remu(Int128 a, Int128 b)
+{
+ return (__uint128_t)a % (__uint128_t)b;
+}
+
+static inline Int128 int128_divs(Int128 a, Int128 b)
+{
+ return a / b;
+}
+
+static inline Int128 int128_rems(Int128 a, Int128 b)
+{
+ return a % b;
+}
+
#else /* !CONFIG_INT128 */
typedef struct Int128 Int128;
@@ -379,6 +399,11 @@ static inline Int128 bswap128(Int128 a)
return int128_make128(bswap64(a.hi), bswap64(a.lo));
}
+Int128 int128_divu(Int128, Int128);
+Int128 int128_remu(Int128, Int128);
+Int128 int128_divs(Int128, Int128);
+Int128 int128_rems(Int128, Int128);
+
#endif /* CONFIG_INT128 */
static inline void bswap128s(Int128 *s)
@@ -386,4 +411,6 @@ static inline void bswap128s(Int128 *s)
*s = bswap128(*s);
}
+#define UINT128_MAX int128_make128(~0LL, ~0LL)
+
#endif /* INT128_H */
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index 60718fc342..d1660d67fa 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -471,6 +471,11 @@ static inline void qemu_cleanup_generic_vfree(void *p)
#else
#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
#endif
+#ifdef MADV_POPULATE_WRITE
+#define QEMU_MADV_POPULATE_WRITE MADV_POPULATE_WRITE
+#else
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
+#endif
#elif defined(CONFIG_POSIX_MADVISE)
@@ -484,6 +489,7 @@ static inline void qemu_cleanup_generic_vfree(void *p)
#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
#else /* no-op */
@@ -497,6 +503,7 @@ static inline void qemu_cleanup_generic_vfree(void *p)
#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
#endif
diff --git a/include/standard-headers/linux/virtio_mem.h b/include/standard-headers/linux/virtio_mem.h
index 05e5ade75d..18c74c527c 100644
--- a/include/standard-headers/linux/virtio_mem.h
+++ b/include/standard-headers/linux/virtio_mem.h
@@ -68,9 +68,10 @@
* explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
*
* There are no guarantees what will happen if unplugged memory is
- * read/written. Such memory should, in general, not be touched. E.g.,
- * even writing might succeed, but the values will simply be discarded at
- * random points in time.
+ * read/written. In general, unplugged memory should not be touched, because
+ * the resulting action is undefined. There is one exception: without
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, unplugged memory inside the usable
+ * region can be read, to simplify creation of memory dumps.
*
* It can happen that the device cannot process a request, because it is
* busy. The device driver has to retry later.
@@ -87,6 +88,8 @@
/* node_id is an ACPI PXM and is valid */
#define VIRTIO_MEM_F_ACPI_PXM 0
+/* unplugged memory must not be accessed */
+#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1
/* --- virtio-mem: guest -> host requests --- */
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 8fae667172..b9421e03ff 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -16,7 +16,6 @@ extern bool qemu_uuid_set;
void qemu_add_exit_notifier(Notifier *notify);
void qemu_remove_exit_notifier(Notifier *notify);
-void qemu_run_machine_init_done_notifiers(void);
void qemu_add_machine_init_done_notifier(Notifier *notify);
void qemu_remove_machine_init_done_notifier(Notifier *notify);
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index 0545a6224c..caa0a63612 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -894,7 +894,7 @@ static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEQ);
+ tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ);
}
static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
@@ -914,7 +914,7 @@ static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
{
- tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEQ);
+ tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ);
}
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
new file mode 100644
index 0000000000..3f5a5d3933
--- /dev/null
+++ b/linux-user/aarch64/target_prctl.h
@@ -0,0 +1,160 @@
+/*
+ * AArch64 specific prctl functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef AARCH64_TARGET_PRCTL_H
+#define AARCH64_TARGET_PRCTL_H
+
+static abi_long do_prctl_get_vl(CPUArchState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ return ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_get_vl do_prctl_get_vl
+
+static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
+{
+ /*
+ * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
+ * i.e. VL=8192, even though the current architectural maximum is VQ=16.
+ */
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env))
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
+ ARMCPU *cpu = env_archcpu(env);
+ uint32_t vq, old_vq;
+
+ old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
+ vq = MAX(arg2 / 16, 1);
+ vq = MIN(vq, cpu->sve_max_vq);
+
+ if (vq < old_vq) {
+ aarch64_sve_narrow_vq(env, vq);
+ }
+ env->vfp.zcr_el[1] = vq - 1;
+ arm_rebuild_hflags(env);
+ return vq * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_set_vl do_prctl_set_vl
+
+static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY |
+ PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY);
+ int ret = 0;
+ Error *err = NULL;
+
+ if (arg2 == 0) {
+ arg2 = all;
+ } else if (arg2 & ~all) {
+ return -TARGET_EINVAL;
+ }
+ if (arg2 & PR_PAC_APIAKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apia,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APIBKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apib,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APDAKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apda,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APDBKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apdb,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APGAKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apga,
+ sizeof(ARMPACKey), &err);
+ }
+ if (ret != 0) {
+ /*
+ * Some unknown failure in the crypto. The best
+ * we can do is log it and fail the syscall.
+ * The real syscall cannot fail this way.
+ */
+ qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s",
+ error_get_pretty(err));
+ error_free(err);
+ return -TARGET_EIO;
+ }
+ return 0;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_reset_keys do_prctl_reset_keys
+
+static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2)
+{
+ abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE;
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ valid_mask |= PR_MTE_TCF_MASK;
+ valid_mask |= PR_MTE_TAG_MASK;
+ }
+
+ if (arg2 & ~valid_mask) {
+ return -TARGET_EINVAL;
+ }
+ env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE;
+
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ switch (arg2 & PR_MTE_TCF_MASK) {
+ case PR_MTE_TCF_NONE:
+ case PR_MTE_TCF_SYNC:
+ case PR_MTE_TCF_ASYNC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
+ * Note that the syscall values are consistent with hw.
+ */
+ env->cp15.sctlr_el[1] =
+ deposit64(env->cp15.sctlr_el[1], 38, 2, arg2 >> PR_MTE_TCF_SHIFT);
+
+ /*
+ * Write PR_MTE_TAG to GCR_EL1[Exclude].
+ * Note that the syscall uses an include mask,
+ * and hardware uses an exclude mask -- invert.
+ */
+ env->cp15.gcr_el1 =
+ deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT);
+ arm_rebuild_hflags(env);
+ }
+ return 0;
+}
+#define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl
+
+static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ abi_long ret = 0;
+
+ if (env->tagged_addr_enable) {
+ ret |= PR_TAGGED_ADDR_ENABLE;
+ }
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ /* See do_prctl_set_tagged_addr_ctrl. */
+ ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT;
+ ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1);
+ }
+ return ret;
+}
+#define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
+
+#endif /* AARCH64_TARGET_PRCTL_H */
diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h
index 7580d99403..40e399d990 100644
--- a/linux-user/aarch64/target_signal.h
+++ b/linux-user/aarch64/target_signal.h
@@ -1,24 +1,6 @@
#ifndef AARCH64_TARGET_SIGNAL_H
#define AARCH64_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h
index 76f6c3391d..a98f568ab4 100644
--- a/linux-user/aarch64/target_syscall.h
+++ b/linux-user/aarch64/target_syscall.h
@@ -15,32 +15,8 @@ struct target_pt_regs {
#endif
#define UNAME_MINIMUM_RELEASE "3.8.0"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
-#define TARGET_PR_SVE_SET_VL 50
-#define TARGET_PR_SVE_GET_VL 51
-
-#define TARGET_PR_PAC_RESET_KEYS 54
-# define TARGET_PR_PAC_APIAKEY (1 << 0)
-# define TARGET_PR_PAC_APIBKEY (1 << 1)
-# define TARGET_PR_PAC_APDAKEY (1 << 2)
-# define TARGET_PR_PAC_APDBKEY (1 << 3)
-# define TARGET_PR_PAC_APGAKEY (1 << 4)
-
-#define TARGET_PR_SET_TAGGED_ADDR_CTRL 55
-#define TARGET_PR_GET_TAGGED_ADDR_CTRL 56
-# define TARGET_PR_TAGGED_ADDR_ENABLE (1UL << 0)
-/* MTE tag check fault modes */
-# define TARGET_PR_MTE_TCF_SHIFT 1
-# define TARGET_PR_MTE_TCF_NONE (0UL << TARGET_PR_MTE_TCF_SHIFT)
-# define TARGET_PR_MTE_TCF_SYNC (1UL << TARGET_PR_MTE_TCF_SHIFT)
-# define TARGET_PR_MTE_TCF_ASYNC (2UL << TARGET_PR_MTE_TCF_SHIFT)
-# define TARGET_PR_MTE_TCF_MASK (3UL << TARGET_PR_MTE_TCF_SHIFT)
-/* MTE tag inclusion mask */
-# define TARGET_PR_MTE_TAG_SHIFT 3
-# define TARGET_PR_MTE_TAG_MASK (0xffffUL << TARGET_PR_MTE_TAG_SHIFT)
-
#endif /* AARCH64_TARGET_SYSCALL_H */
diff --git a/linux-user/alpha/target_prctl.h b/linux-user/alpha/target_prctl.h
new file mode 100644
index 0000000000..5629ddbf39
--- /dev/null
+++ b/linux-user/alpha/target_prctl.h
@@ -0,0 +1 @@
+#include "../generic/target_prctl_unalign.h"
diff --git a/linux-user/alpha/target_signal.h b/linux-user/alpha/target_signal.h
index 0b6a39de65..bbb06e5463 100644
--- a/linux-user/alpha/target_signal.h
+++ b/linux-user/alpha/target_signal.h
@@ -62,7 +62,6 @@ typedef struct target_sigaltstack {
#define TARGET_SA_SIGINFO 0x00000040
#define TARGET_MINSIGSTKSZ 4096
-#define TARGET_SIGSTKSZ 16384
/* From <asm/gentrap.h>. */
#define TARGET_GEN_INTOVF -1 /* integer overflow */
diff --git a/linux-user/alpha/target_syscall.h b/linux-user/alpha/target_syscall.h
index 03091bf0a8..fda3a49f29 100644
--- a/linux-user/alpha/target_syscall.h
+++ b/linux-user/alpha/target_syscall.h
@@ -63,7 +63,6 @@ struct target_pt_regs {
#define TARGET_UAC_NOPRINT 1
#define TARGET_UAC_NOFIX 2
#define TARGET_UAC_SIGBUS 4
-#define TARGET_MINSIGSTKSZ 4096
#define TARGET_MCL_CURRENT 0x2000
#define TARGET_MCL_FUTURE 0x4000
#define TARGET_MCL_ONFAULT 0x8000
diff --git a/linux-user/arm/target_prctl.h b/linux-user/arm/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/arm/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/arm/target_signal.h b/linux-user/arm/target_signal.h
index 1e7fb0cecb..0e6351d9f7 100644
--- a/linux-user/arm/target_signal.h
+++ b/linux-user/arm/target_signal.h
@@ -1,24 +1,6 @@
#ifndef ARM_TARGET_SIGNAL_H
#define ARM_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
diff --git a/linux-user/arm/target_syscall.h b/linux-user/arm/target_syscall.h
index e870ed7a54..f04f9c9e3d 100644
--- a/linux-user/arm/target_syscall.h
+++ b/linux-user/arm/target_syscall.h
@@ -27,7 +27,6 @@ struct target_pt_regs {
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/cris/target_prctl.h b/linux-user/cris/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/cris/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/cris/target_signal.h b/linux-user/cris/target_signal.h
index 83a5155507..ab0653fcdc 100644
--- a/linux-user/cris/target_signal.h
+++ b/linux-user/cris/target_signal.h
@@ -1,24 +1,6 @@
#ifndef CRIS_TARGET_SIGNAL_H
#define CRIS_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
diff --git a/linux-user/cris/target_syscall.h b/linux-user/cris/target_syscall.h
index 19e1281403..0b5ebf1f02 100644
--- a/linux-user/cris/target_syscall.h
+++ b/linux-user/cris/target_syscall.h
@@ -39,7 +39,6 @@ struct target_pt_regs {
};
#define TARGET_CLONE_BACKWARDS2
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 767f54c76d..329b2375ef 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -390,11 +390,11 @@ enum {
/* The commpage only exists for 32 bit kernels */
-#define ARM_COMMPAGE (intptr_t)0xffff0f00u
+#define HI_COMMPAGE (intptr_t)0xffff0f00u
static bool init_guest_commpage(void)
{
- void *want = g2h_untagged(ARM_COMMPAGE & -qemu_host_page_size);
+ void *want = g2h_untagged(HI_COMMPAGE & -qemu_host_page_size);
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
@@ -1099,6 +1099,47 @@ static void init_thread(struct target_pt_regs *regs, struct image_info *infop)
regs->estatus = 0x3;
}
+#define LO_COMMPAGE TARGET_PAGE_SIZE
+
+static bool init_guest_commpage(void)
+{
+ static const uint8_t kuser_page[4 + 2 * 64] = {
+ /* __kuser_helper_version */
+ [0x00] = 0x02, 0x00, 0x00, 0x00,
+
+ /* __kuser_cmpxchg */
+ [0x04] = 0x3a, 0x6c, 0x3b, 0x00, /* trap 16 */
+ 0x3a, 0x28, 0x00, 0xf8, /* ret */
+
+ /* __kuser_sigtramp */
+ [0x44] = 0xc4, 0x22, 0x80, 0x00, /* movi r2, __NR_rt_sigreturn */
+ 0x3a, 0x68, 0x3b, 0x00, /* trap 0 */
+ };
+
+ void *want = g2h_untagged(LO_COMMPAGE & -qemu_host_page_size);
+ void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
+
+ if (addr == MAP_FAILED) {
+ perror("Allocating guest commpage");
+ exit(EXIT_FAILURE);
+ }
+ if (addr != want) {
+ return false;
+ }
+
+ memcpy(addr, kuser_page, sizeof(kuser_page));
+
+ if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
+ perror("Protecting guest commpage");
+ exit(EXIT_FAILURE);
+ }
+
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
+ PAGE_READ | PAGE_EXEC | PAGE_VALID);
+ return true;
+}
+
#define ELF_EXEC_PAGESIZE 4096
#define USE_ELF_CORE_DUMP
@@ -2160,8 +2201,13 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
return sp;
}
-#ifndef ARM_COMMPAGE
-#define ARM_COMMPAGE 0
+#if defined(HI_COMMPAGE)
+#define LO_COMMPAGE 0
+#elif defined(LO_COMMPAGE)
+#define HI_COMMPAGE 0
+#else
+#define HI_COMMPAGE 0
+#define LO_COMMPAGE 0
#define init_guest_commpage() true
#endif
@@ -2361,7 +2407,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
}
loaddr &= -align;
- if (ARM_COMMPAGE) {
+ if (HI_COMMPAGE) {
/*
* Extend the allocation to include the commpage.
* For a 64-bit host, this is just 4GiB; for a 32-bit host we
@@ -2372,14 +2418,16 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
hiaddr = (uintptr_t) 4 << 30;
} else {
- offset = -(ARM_COMMPAGE & -align);
+ offset = -(HI_COMMPAGE & -align);
}
+ } else if (LO_COMMPAGE) {
+ loaddr = MIN(loaddr, LO_COMMPAGE & -align);
}
addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
if (addr == -1) {
/*
- * If ARM_COMMPAGE, there *might* be a non-consecutive allocation
+ * If HI_COMMPAGE, there *might* be a non-consecutive allocation
* that can satisfy both. But as the normal arm32 link base address
* is ~32k, and we extend down to include the commpage, making the
* overhead only ~96k, this is unlikely.
@@ -2400,7 +2448,7 @@ static void pgb_dynamic(const char *image_name, long align)
* All we need is a commpage that satisfies align.
* If we do not need a commpage, leave guest_base == 0.
*/
- if (ARM_COMMPAGE) {
+ if (HI_COMMPAGE) {
uintptr_t addr, commpage;
/* 64-bit hosts should have used reserved_va. */
@@ -2410,7 +2458,7 @@ static void pgb_dynamic(const char *image_name, long align)
* By putting the commpage at the first hole, that puts guest_base
* just above that, and maximises the positive guest addresses.
*/
- commpage = ARM_COMMPAGE & -align;
+ commpage = HI_COMMPAGE & -align;
addr = pgb_find_hole(commpage, -commpage, align, 0);
assert(addr != -1);
guest_base = addr;
diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c
index 6941089959..a17d05c079 100644
--- a/linux-user/fd-trans.c
+++ b/linux-user/fd-trans.c
@@ -138,6 +138,9 @@ enum {
QEMU_IFLA_PROP_LIST,
QEMU_IFLA_ALT_IFNAME,
QEMU_IFLA_PERM_ADDRESS,
+ QEMU_IFLA_PROTO_DOWN_REASON,
+ QEMU_IFLA_PARENT_DEV_NAME,
+ QEMU_IFLA_PARENT_DEV_BUS_NAME,
QEMU___IFLA_MAX
};
@@ -179,6 +182,8 @@ enum {
QEMU_IFLA_BRPORT_BACKUP_PORT,
QEMU_IFLA_BRPORT_MRP_RING_OPEN,
QEMU_IFLA_BRPORT_MRP_IN_OPEN,
+ QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
+ QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
QEMU___IFLA_BRPORT_MAX
};
@@ -268,6 +273,37 @@ enum {
QEMU___RTA_MAX
};
+enum {
+ QEMU_IFLA_VF_STATS_RX_PACKETS,
+ QEMU_IFLA_VF_STATS_TX_PACKETS,
+ QEMU_IFLA_VF_STATS_RX_BYTES,
+ QEMU_IFLA_VF_STATS_TX_BYTES,
+ QEMU_IFLA_VF_STATS_BROADCAST,
+ QEMU_IFLA_VF_STATS_MULTICAST,
+ QEMU_IFLA_VF_STATS_PAD,
+ QEMU_IFLA_VF_STATS_RX_DROPPED,
+ QEMU_IFLA_VF_STATS_TX_DROPPED,
+ QEMU__IFLA_VF_STATS_MAX,
+};
+
+enum {
+ QEMU_IFLA_VF_UNSPEC,
+ QEMU_IFLA_VF_MAC,
+ QEMU_IFLA_VF_VLAN,
+ QEMU_IFLA_VF_TX_RATE,
+ QEMU_IFLA_VF_SPOOFCHK,
+ QEMU_IFLA_VF_LINK_STATE,
+ QEMU_IFLA_VF_RATE,
+ QEMU_IFLA_VF_RSS_QUERY_EN,
+ QEMU_IFLA_VF_STATS,
+ QEMU_IFLA_VF_TRUST,
+ QEMU_IFLA_VF_IB_NODE_GUID,
+ QEMU_IFLA_VF_IB_PORT_GUID,
+ QEMU_IFLA_VF_VLAN_LIST,
+ QEMU_IFLA_VF_BROADCAST,
+ QEMU__IFLA_VF_MAX,
+};
+
TargetFdTrans **target_fd_trans;
QemuMutex target_fd_trans_lock;
unsigned int target_fd_max;
@@ -573,6 +609,8 @@ static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
/* uin32_t */
case QEMU_IFLA_BRPORT_COST:
case QEMU_IFLA_BRPORT_BACKUP_PORT:
+ case QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT:
+ case QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_CNT:
u32 = NLA_DATA(nlattr);
*u32 = tswap32(*u32);
break;
@@ -805,6 +843,145 @@ static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
return 0;
}
+static abi_long host_to_target_data_vlan_list_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ struct ifla_vf_vlan_info *vlan_info;
+
+ switch (nlattr->nla_type) {
+ /* struct ifla_vf_vlan_info */
+ case IFLA_VF_VLAN_INFO:
+ vlan_info = NLA_DATA(nlattr);
+ vlan_info->vf = tswap32(vlan_info->vf);
+ vlan_info->vlan = tswap32(vlan_info->vlan);
+ vlan_info->qos = tswap32(vlan_info->qos);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host VLAN LIST type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_vf_stats_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ uint64_t *u64;
+
+ switch (nlattr->nla_type) {
+ /* uint64_t */
+ case QEMU_IFLA_VF_STATS_RX_PACKETS:
+ case QEMU_IFLA_VF_STATS_TX_PACKETS:
+ case QEMU_IFLA_VF_STATS_RX_BYTES:
+ case QEMU_IFLA_VF_STATS_TX_BYTES:
+ case QEMU_IFLA_VF_STATS_BROADCAST:
+ case QEMU_IFLA_VF_STATS_MULTICAST:
+ case QEMU_IFLA_VF_STATS_PAD:
+ case QEMU_IFLA_VF_STATS_RX_DROPPED:
+ case QEMU_IFLA_VF_STATS_TX_DROPPED:
+ u64 = NLA_DATA(nlattr);
+ *u64 = tswap64(*u64);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host VF STATS type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_vfinfo_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ struct ifla_vf_mac *mac;
+ struct ifla_vf_vlan *vlan;
+ struct ifla_vf_vlan_info *vlan_info;
+ struct ifla_vf_spoofchk *spoofchk;
+ struct ifla_vf_rate *rate;
+ struct ifla_vf_link_state *link_state;
+ struct ifla_vf_rss_query_en *rss_query_en;
+ struct ifla_vf_trust *trust;
+ struct ifla_vf_guid *guid;
+
+ switch (nlattr->nla_type) {
+ /* struct ifla_vf_mac */
+ case QEMU_IFLA_VF_MAC:
+ mac = NLA_DATA(nlattr);
+ mac->vf = tswap32(mac->vf);
+ break;
+ /* struct ifla_vf_broadcast */
+ case QEMU_IFLA_VF_BROADCAST:
+ break;
+ /* struct struct ifla_vf_vlan */
+ case QEMU_IFLA_VF_VLAN:
+ vlan = NLA_DATA(nlattr);
+ vlan->vf = tswap32(vlan->vf);
+ vlan->vlan = tswap32(vlan->vlan);
+ vlan->qos = tswap32(vlan->qos);
+ break;
+ /* struct ifla_vf_vlan_info */
+ case QEMU_IFLA_VF_TX_RATE:
+ vlan_info = NLA_DATA(nlattr);
+ vlan_info->vf = tswap32(vlan_info->vf);
+ vlan_info->vlan = tswap32(vlan_info->vlan);
+ vlan_info->qos = tswap32(vlan_info->qos);
+ break;
+ /* struct ifla_vf_spoofchk */
+ case QEMU_IFLA_VF_SPOOFCHK:
+ spoofchk = NLA_DATA(nlattr);
+ spoofchk->vf = tswap32(spoofchk->vf);
+ spoofchk->setting = tswap32(spoofchk->setting);
+ break;
+ /* struct ifla_vf_rate */
+ case QEMU_IFLA_VF_RATE:
+ rate = NLA_DATA(nlattr);
+ rate->vf = tswap32(rate->vf);
+ rate->min_tx_rate = tswap32(rate->min_tx_rate);
+ rate->max_tx_rate = tswap32(rate->max_tx_rate);
+ break;
+ /* struct ifla_vf_link_state */
+ case QEMU_IFLA_VF_LINK_STATE:
+ link_state = NLA_DATA(nlattr);
+ link_state->vf = tswap32(link_state->vf);
+ link_state->link_state = tswap32(link_state->link_state);
+ break;
+ /* struct ifla_vf_rss_query_en */
+ case QEMU_IFLA_VF_RSS_QUERY_EN:
+ rss_query_en = NLA_DATA(nlattr);
+ rss_query_en->vf = tswap32(rss_query_en->vf);
+ rss_query_en->setting = tswap32(rss_query_en->setting);
+ break;
+ /* struct ifla_vf_trust */
+ case QEMU_IFLA_VF_TRUST:
+ trust = NLA_DATA(nlattr);
+ trust->vf = tswap32(trust->vf);
+ trust->setting = tswap32(trust->setting);
+ break;
+ /* struct ifla_vf_guid */
+ case QEMU_IFLA_VF_IB_NODE_GUID:
+ case QEMU_IFLA_VF_IB_PORT_GUID:
+ guid = NLA_DATA(nlattr);
+ guid->vf = tswap32(guid->vf);
+ guid->guid = tswap32(guid->guid);
+ break;
+ /* nested */
+ case QEMU_IFLA_VF_VLAN_LIST:
+ return host_to_target_for_each_nlattr(RTA_DATA(nlattr), nlattr->nla_len,
+ NULL,
+ host_to_target_data_vlan_list_nlattr);
+ case QEMU_IFLA_VF_STATS:
+ return host_to_target_for_each_nlattr(RTA_DATA(nlattr), nlattr->nla_len,
+ NULL,
+ host_to_target_data_vf_stats_nlattr);
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host VFINFO type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
@@ -818,9 +995,12 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
case QEMU_IFLA_ADDRESS:
case QEMU_IFLA_BROADCAST:
case QEMU_IFLA_PERM_ADDRESS:
+ case QEMU_IFLA_PHYS_PORT_ID:
/* string */
case QEMU_IFLA_IFNAME:
case QEMU_IFLA_QDISC:
+ case QEMU_IFLA_PARENT_DEV_NAME:
+ case QEMU_IFLA_PARENT_DEV_BUS_NAME:
break;
/* uin8_t */
case QEMU_IFLA_OPERSTATE:
@@ -939,6 +1119,10 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
NULL,
host_to_target_data_xdp_nlattr);
+ case QEMU_IFLA_VFINFO_LIST:
+ return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+ NULL,
+ host_to_target_data_vfinfo_nlattr);
default:
qemu_log_mask(LOG_UNIMP, "Unknown host QEMU_IFLA type: %d\n",
rtattr->rta_type);
diff --git a/linux-user/generic/signal.h b/linux-user/generic/signal.h
index 943bc1a1e2..6fd05b77bb 100644
--- a/linux-user/generic/signal.h
+++ b/linux-user/generic/signal.h
@@ -55,6 +55,21 @@
#define TARGET_SIG_UNBLOCK 1 /* for unblocking signals */
#define TARGET_SIG_SETMASK 2 /* for setting the signal mask */
+/* this struct defines a stack used during syscall handling */
+typedef struct target_sigaltstack {
+ abi_ulong ss_sp;
+ abi_int ss_flags;
+ abi_ulong ss_size;
+} target_stack_t;
+
+/*
+ * sigaltstack controls
+ */
+#define TARGET_SS_ONSTACK 1
+#define TARGET_SS_DISABLE 2
+
+#define TARGET_MINSIGSTKSZ 2048
+
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
/* mask for all SS_xxx flags */
diff --git a/linux-user/generic/target_prctl_unalign.h b/linux-user/generic/target_prctl_unalign.h
new file mode 100644
index 0000000000..bc3b83af2a
--- /dev/null
+++ b/linux-user/generic/target_prctl_unalign.h
@@ -0,0 +1,27 @@
+/*
+ * Generic prctl unalign functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef GENERIC_TARGET_PRCTL_UNALIGN_H
+#define GENERIC_TARGET_PRCTL_UNALIGN_H
+
+static abi_long do_prctl_get_unalign(CPUArchState *env, target_long arg2)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t res = PR_UNALIGN_NOPRINT;
+ if (cs->prctl_unalign_sigbus) {
+ res |= PR_UNALIGN_SIGBUS;
+ }
+ return put_user_u32(res, arg2);
+}
+#define do_prctl_get_unalign do_prctl_get_unalign
+
+static abi_long do_prctl_set_unalign(CPUArchState *env, target_long arg2)
+{
+ env_cpu(env)->prctl_unalign_sigbus = arg2 & PR_UNALIGN_SIGBUS;
+ return 0;
+}
+#define do_prctl_set_unalign do_prctl_set_unalign
+
+#endif /* GENERIC_TARGET_PRCTL_UNALIGN_H */
diff --git a/linux-user/hexagon/target_prctl.h b/linux-user/hexagon/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/hexagon/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/hexagon/target_signal.h b/linux-user/hexagon/target_signal.h
index 9e0223d322..193abac340 100644
--- a/linux-user/hexagon/target_signal.h
+++ b/linux-user/hexagon/target_signal.h
@@ -18,17 +18,6 @@
#ifndef HEXAGON_TARGET_SIGNAL_H
#define HEXAGON_TARGET_SIGNAL_H
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/host/loongarch64/host-signal.h b/linux-user/host/loongarch64/host-signal.h
index 05e2c82371..7effa24251 100644
--- a/linux-user/host/loongarch64/host-signal.h
+++ b/linux-user/host/loongarch64/host-signal.h
@@ -54,9 +54,7 @@ static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
}
break;
case 0b001110: /* indexed, atomic, bounds-checking memory operations */
- uint32_t sel = (insn >> 15) & 0b11111111111;
-
- switch (sel) {
+ switch ((insn >> 15) & 0b11111111111) {
case 0b00000100000: /* stx.b */
case 0b00000101000: /* stx.h */
case 0b00000110000: /* stx.w */
diff --git a/linux-user/hppa/target_prctl.h b/linux-user/hppa/target_prctl.h
new file mode 100644
index 0000000000..5629ddbf39
--- /dev/null
+++ b/linux-user/hppa/target_prctl.h
@@ -0,0 +1 @@
+#include "../generic/target_prctl_unalign.h"
diff --git a/linux-user/hppa/target_signal.h b/linux-user/hppa/target_signal.h
index d558119ee7..af6c2fce58 100644
--- a/linux-user/hppa/target_signal.h
+++ b/linux-user/hppa/target_signal.h
@@ -64,7 +64,6 @@ typedef struct target_sigaltstack {
#define TARGET_SA_NOCLDWAIT 0x00000080
#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
diff --git a/linux-user/hppa/target_syscall.h b/linux-user/hppa/target_syscall.h
index 0018bcb5c4..4b382c1fcf 100644
--- a/linux-user/hppa/target_syscall.h
+++ b/linux-user/hppa/target_syscall.h
@@ -22,7 +22,6 @@ struct target_pt_regs {
#define UNAME_MACHINE "parisc"
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/i386/target_prctl.h b/linux-user/i386/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/i386/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/i386/target_signal.h b/linux-user/i386/target_signal.h
index 64d09f2e75..9315cba241 100644
--- a/linux-user/i386/target_signal.h
+++ b/linux-user/i386/target_signal.h
@@ -1,24 +1,6 @@
#ifndef I386_TARGET_SIGNAL_H
#define I386_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
diff --git a/linux-user/i386/target_syscall.h b/linux-user/i386/target_syscall.h
index ed356b3908..aaade06b13 100644
--- a/linux-user/i386/target_syscall.h
+++ b/linux-user/i386/target_syscall.h
@@ -150,7 +150,6 @@ struct target_vm86plus_struct {
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/m68k/target_prctl.h b/linux-user/m68k/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/m68k/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/m68k/target_signal.h b/linux-user/m68k/target_signal.h
index 94157bf1f4..6e0f4b74e3 100644
--- a/linux-user/m68k/target_signal.h
+++ b/linux-user/m68k/target_signal.h
@@ -1,24 +1,6 @@
#ifndef M68K_TARGET_SIGNAL_H
#define M68K_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
diff --git a/linux-user/m68k/target_syscall.h b/linux-user/m68k/target_syscall.h
index 23359a6299..8d4ddbd76c 100644
--- a/linux-user/m68k/target_syscall.h
+++ b/linux-user/m68k/target_syscall.h
@@ -20,7 +20,6 @@ struct target_pt_regs {
#define UNAME_MACHINE "m68k"
#define UNAME_MINIMUM_RELEASE "2.6.32"
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/microblaze/target_prctl.h b/linux-user/microblaze/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/microblaze/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/microblaze/target_signal.h b/linux-user/microblaze/target_signal.h
index e8b510f6b1..7dc5c45f00 100644
--- a/linux-user/microblaze/target_signal.h
+++ b/linux-user/microblaze/target_signal.h
@@ -1,24 +1,6 @@
#ifndef MICROBLAZE_TARGET_SIGNAL_H
#define MICROBLAZE_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/microblaze/target_syscall.h b/linux-user/microblaze/target_syscall.h
index 7f653db34f..43362a1664 100644
--- a/linux-user/microblaze/target_syscall.h
+++ b/linux-user/microblaze/target_syscall.h
@@ -49,7 +49,6 @@ struct target_pt_regs {
};
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/mips/target_prctl.h b/linux-user/mips/target_prctl.h
new file mode 100644
index 0000000000..e028333db9
--- /dev/null
+++ b/linux-user/mips/target_prctl.h
@@ -0,0 +1,88 @@
+/*
+ * MIPS specific prctl functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef MIPS_TARGET_PRCTL_H
+#define MIPS_TARGET_PRCTL_H
+
+static abi_long do_prctl_get_fp_mode(CPUArchState *env)
+{
+ abi_long ret = 0;
+
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ ret |= PR_FP_MODE_FR;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
+ ret |= PR_FP_MODE_FRE;
+ }
+ return ret;
+}
+#define do_prctl_get_fp_mode do_prctl_get_fp_mode
+
+static abi_long do_prctl_set_fp_mode(CPUArchState *env, abi_long arg2)
+{
+ bool old_fr = env->CP0_Status & (1 << CP0St_FR);
+ bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
+ bool new_fr = arg2 & PR_FP_MODE_FR;
+ bool new_fre = arg2 & PR_FP_MODE_FRE;
+ const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
+
+ /* If nothing to change, return right away, successfully. */
+ if (old_fr == new_fr && old_fre == new_fre) {
+ return 0;
+ }
+ /* Check the value is valid */
+ if (arg2 & ~known_bits) {
+ return -TARGET_EOPNOTSUPP;
+ }
+ /* Setting FRE without FR is not supported. */
+ if (new_fre && !new_fr) {
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
+ /* FR1 is not supported */
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
+ && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
+ /* cannot set FR=0 */
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
+ /* Cannot set FRE=1 */
+ return -TARGET_EOPNOTSUPP;
+ }
+
+ int i;
+ fpr_t *fpr = env->active_fpu.fpr;
+ for (i = 0; i < 32 ; i += 2) {
+ if (!old_fr && new_fr) {
+ fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
+ } else if (old_fr && !new_fr) {
+ fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
+ }
+ }
+
+ if (new_fr) {
+ env->CP0_Status |= (1 << CP0St_FR);
+ env->hflags |= MIPS_HFLAG_F64;
+ } else {
+ env->CP0_Status &= ~(1 << CP0St_FR);
+ env->hflags &= ~MIPS_HFLAG_F64;
+ }
+ if (new_fre) {
+ env->CP0_Config5 |= (1 << CP0C5_FRE);
+ if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
+ env->hflags |= MIPS_HFLAG_FRE;
+ }
+ } else {
+ env->CP0_Config5 &= ~(1 << CP0C5_FRE);
+ env->hflags &= ~MIPS_HFLAG_FRE;
+ }
+
+ return 0;
+}
+#define do_prctl_set_fp_mode do_prctl_set_fp_mode
+
+#endif /* MIPS_TARGET_PRCTL_H */
diff --git a/linux-user/mips/target_signal.h b/linux-user/mips/target_signal.h
index 780a4ddf29..fa542c1f4e 100644
--- a/linux-user/mips/target_signal.h
+++ b/linux-user/mips/target_signal.h
@@ -67,7 +67,6 @@ typedef struct target_sigaltstack {
#define TARGET_SA_RESTORER 0x04000000 /* Only for O32 */
#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
#if defined(TARGET_ABI_MIPSO32)
/* compare linux/arch/mips/kernel/signal.c:setup_frame() */
diff --git a/linux-user/mips/target_syscall.h b/linux-user/mips/target_syscall.h
index f59057493a..08ead67810 100644
--- a/linux-user/mips/target_syscall.h
+++ b/linux-user/mips/target_syscall.h
@@ -24,7 +24,6 @@ struct target_pt_regs {
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
@@ -36,10 +35,4 @@ static inline abi_ulong target_shmlba(CPUMIPSState *env)
return 0x40000;
}
-/* MIPS-specific prctl() options */
-#define TARGET_PR_SET_FP_MODE 45
-#define TARGET_PR_GET_FP_MODE 46
-#define TARGET_PR_FP_MODE_FR (1 << 0)
-#define TARGET_PR_FP_MODE_FRE (1 << 1)
-
#endif /* MIPS_TARGET_SYSCALL_H */
diff --git a/linux-user/mips64/target_prctl.h b/linux-user/mips64/target_prctl.h
new file mode 100644
index 0000000000..18da9ae619
--- /dev/null
+++ b/linux-user/mips64/target_prctl.h
@@ -0,0 +1 @@
+#include "../mips/target_prctl.h"
diff --git a/linux-user/mips64/target_signal.h b/linux-user/mips64/target_signal.h
index 275e9b7f9a..b05098f7f6 100644
--- a/linux-user/mips64/target_signal.h
+++ b/linux-user/mips64/target_signal.h
@@ -65,7 +65,6 @@ typedef struct target_sigaltstack {
#define TARGET_SA_RESETHAND 0x80000000
#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
diff --git a/linux-user/mips64/target_syscall.h b/linux-user/mips64/target_syscall.h
index cd1e1b4969..358dc2d64c 100644
--- a/linux-user/mips64/target_syscall.h
+++ b/linux-user/mips64/target_syscall.h
@@ -21,7 +21,6 @@ struct target_pt_regs {
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
@@ -33,10 +32,4 @@ static inline abi_ulong target_shmlba(CPUMIPSState *env)
return 0x40000;
}
-/* MIPS-specific prctl() options */
-#define TARGET_PR_SET_FP_MODE 45
-#define TARGET_PR_GET_FP_MODE 46
-#define TARGET_PR_FP_MODE_FR (1 << 0)
-#define TARGET_PR_FP_MODE_FRE (1 << 1)
-
#endif /* MIPS64_TARGET_SYSCALL_H */
diff --git a/linux-user/nios2/cpu_loop.c b/linux-user/nios2/cpu_loop.c
index 34290fb3b5..1e93ef34e6 100644
--- a/linux-user/nios2/cpu_loop.c
+++ b/linux-user/nios2/cpu_loop.c
@@ -26,7 +26,6 @@
void cpu_loop(CPUNios2State *env)
{
CPUState *cs = env_cpu(env);
- Nios2CPU *cpu = NIOS2_CPU(cs);
target_siginfo_t info;
int trapnr, ret;
@@ -39,9 +38,10 @@ void cpu_loop(CPUNios2State *env)
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
+
case EXCP_TRAP:
- if (env->regs[R_AT] == 0) {
- abi_long ret;
+ switch (env->error_code) {
+ case 0:
qemu_log_mask(CPU_LOG_INT, "\nSyscall\n");
ret = do_syscall(env, env->regs[2],
@@ -55,26 +55,56 @@ void cpu_loop(CPUNios2State *env)
env->regs[2] = abs(ret);
/* Return value is 0..4096 */
- env->regs[7] = (ret > 0xfffffffffffff000ULL);
- env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
- env->regs[CR_STATUS] &= ~0x3;
- env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[7] = ret > 0xfffff000u;
env->regs[R_PC] += 4;
break;
- } else {
- qemu_log_mask(CPU_LOG_INT, "\nTrap\n");
- env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
- env->regs[CR_STATUS] &= ~0x3;
- env->regs[R_EA] = env->regs[R_PC] + 4;
- env->regs[R_PC] = cpu->exception_addr;
+ case 1:
+ qemu_log_mask(CPU_LOG_INT, "\nTrap 1\n");
+ force_sig_fault(TARGET_SIGUSR1, 0, env->regs[R_PC]);
+ break;
+ case 2:
+ qemu_log_mask(CPU_LOG_INT, "\nTrap 2\n");
+ force_sig_fault(TARGET_SIGUSR2, 0, env->regs[R_PC]);
+ break;
+ case 31:
+ qemu_log_mask(CPU_LOG_INT, "\nTrap 31\n");
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->regs[R_PC]);
+ break;
+ default:
+ qemu_log_mask(CPU_LOG_INT, "\nTrap %d\n", env->error_code);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP,
+ env->regs[R_PC]);
+ break;
+
+ case 16: /* QEMU specific, for __kuser_cmpxchg */
+ {
+ abi_ptr g = env->regs[4];
+ uint32_t *h, n, o;
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ if (g & 0x3) {
+ force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, g);
+ break;
+ }
+ ret = page_get_flags(g);
+ if (!(ret & PAGE_VALID)) {
+ force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, g);
+ break;
+ }
+ if (!(ret & PAGE_READ) || !(ret & PAGE_WRITE)) {
+ force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_ACCERR, g);
+ break;
+ }
+ h = g2h(cs, g);
+ o = env->regs[5];
+ n = env->regs[6];
+ env->regs[2] = qatomic_cmpxchg(h, o, n) - o;
+ env->regs[R_PC] += 4;
+ }
break;
}
+ break;
+
case EXCP_DEBUG:
info.si_signo = TARGET_SIGTRAP;
info.si_errno = 0;
@@ -82,29 +112,7 @@ void cpu_loop(CPUNios2State *env)
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
break;
case 0xaa:
- switch (env->regs[R_PC]) {
- /*case 0x1000:*/ /* TODO:__kuser_helper_version */
- case 0x1004: /* __kuser_cmpxchg */
- start_exclusive();
- if (env->regs[4] & 0x3) {
- goto kuser_fail;
- }
- ret = get_user_u32(env->regs[2], env->regs[4]);
- if (ret) {
- end_exclusive();
- goto kuser_fail;
- }
- env->regs[2] -= env->regs[5];
- if (env->regs[2] == 0) {
- put_user_u32(env->regs[6], env->regs[4]);
- }
- end_exclusive();
- env->regs[R_PC] = env->regs[R_RA];
- break;
- /*case 0x1040:*/ /* TODO:__kuser_sigtramp */
- default:
- ;
-kuser_fail:
+ {
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* TODO: check env->error_code */
@@ -147,9 +155,6 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
env->regs[R_SP] = regs->sp;
env->regs[R_GP] = regs->gp;
env->regs[CR_ESTATUS] = regs->estatus;
- env->regs[R_EA] = regs->ea;
- /* TODO: unsigned long orig_r7; */
-
- /* Emulate eret when starting thread. */
env->regs[R_PC] = regs->ea;
+ /* TODO: unsigned long orig_r7; */
}
diff --git a/linux-user/nios2/signal.c b/linux-user/nios2/signal.c
index a77e8a40f4..517cd39270 100644
--- a/linux-user/nios2/signal.c
+++ b/linux-user/nios2/signal.c
@@ -42,7 +42,7 @@ struct target_rt_sigframe {
struct target_ucontext uc;
};
-static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
+static void rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
{
unsigned long *gregs = uc->tuc_mcontext.gregs;
@@ -73,10 +73,8 @@ static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
__put_user(env->regs[R_RA], &gregs[23]);
__put_user(env->regs[R_FP], &gregs[24]);
__put_user(env->regs[R_GP], &gregs[25]);
- __put_user(env->regs[R_EA], &gregs[27]);
+ __put_user(env->regs[R_PC], &gregs[27]);
__put_user(env->regs[R_SP], &gregs[28]);
-
- return 0;
}
static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
@@ -124,7 +122,7 @@ static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
__get_user(env->regs[R_GP], &gregs[25]);
/* Not really necessary no user settable bits */
__get_user(temp, &gregs[26]);
- __get_user(env->regs[R_EA], &gregs[27]);
+ __get_user(env->regs[R_PC], &gregs[27]);
__get_user(env->regs[R_RA], &gregs[23]);
__get_user(env->regs[R_SP], &gregs[28]);
@@ -135,8 +133,8 @@ static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
return 0;
}
-static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
- size_t frame_size)
+static abi_ptr get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
+ size_t frame_size)
{
unsigned long usp;
@@ -144,7 +142,7 @@ static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
usp = target_sigsp(get_sp_from_cpustate(env), ka);
/* Verify, is it 32 or 64 bit aligned */
- return (void *)((usp - frame_size) & -8UL);
+ return (usp - frame_size) & -8;
}
void setup_rt_frame(int sig, struct target_sigaction *ka,
@@ -153,26 +151,24 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
CPUNios2State *env)
{
struct target_rt_sigframe *frame;
- int i, err = 0;
+ abi_ptr frame_addr;
+ int i;
- frame = get_sigframe(ka, env, sizeof(*frame));
-
- if (ka->sa_flags & SA_SIGINFO) {
- tswap_siginfo(&frame->info, info);
+ frame_addr = get_sigframe(ka, env, sizeof(*frame));
+ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
+ force_sigsegv(sig);
+ return;
}
+ tswap_siginfo(&frame->info, info);
+
/* Create the ucontext. */
__put_user(0, &frame->uc.tuc_flags);
__put_user(0, &frame->uc.tuc_link);
target_save_altstack(&frame->uc.tuc_stack, env);
- err |= rt_setup_ucontext(&frame->uc, env);
+ rt_setup_ucontext(&frame->uc, env);
for (i = 0; i < TARGET_NSIG_WORDS; i++) {
- __put_user((abi_ulong)set->sig[i],
- (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
- }
-
- if (err) {
- goto give_sigsegv;
+ __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
}
/* Set up to return from userspace; jump to fixed address sigreturn
@@ -180,19 +176,13 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
env->regs[R_RA] = (unsigned long) (0x1044);
/* Set up registers for signal handler */
- env->regs[R_SP] = (unsigned long) frame;
- env->regs[4] = (unsigned long) sig;
- env->regs[5] = (unsigned long) &frame->info;
- env->regs[6] = (unsigned long) &frame->uc;
- env->regs[R_EA] = (unsigned long) ka->_sa_handler;
- return;
-
-give_sigsegv:
- if (sig == TARGET_SIGSEGV) {
- ka->_sa_handler = TARGET_SIG_DFL;
- }
- force_sigsegv(sig);
- return;
+ env->regs[R_SP] = frame_addr;
+ env->regs[4] = sig;
+ env->regs[5] = frame_addr + offsetof(struct target_rt_sigframe, info);
+ env->regs[6] = frame_addr + offsetof(struct target_rt_sigframe, uc);
+ env->regs[R_PC] = ka->_sa_handler;
+
+ unlock_user_struct(frame, frame_addr, 1);
}
long do_sigreturn(CPUNios2State *env)
@@ -215,7 +205,7 @@ long do_rt_sigreturn(CPUNios2State *env)
}
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
if (rt_restore_ucontext(env, &frame->uc, &rval)) {
goto badframe;
diff --git a/linux-user/nios2/target_prctl.h b/linux-user/nios2/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/nios2/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/nios2/target_signal.h b/linux-user/nios2/target_signal.h
index fe266c4c51..46ca5948ce 100644
--- a/linux-user/nios2/target_signal.h
+++ b/linux-user/nios2/target_signal.h
@@ -1,22 +1,6 @@
#ifndef NIOS2_TARGET_SIGNAL_H
#define NIOS2_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/* sigaltstack controls */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
/* Nios2 uses a fixed address on the kuser page for sigreturn. */
diff --git a/linux-user/nios2/target_syscall.h b/linux-user/nios2/target_syscall.h
index 78006c24d4..561b28d281 100644
--- a/linux-user/nios2/target_syscall.h
+++ b/linux-user/nios2/target_syscall.h
@@ -30,7 +30,6 @@ struct target_pt_regs {
unsigned long orig_r7;
};
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/openrisc/target_prctl.h b/linux-user/openrisc/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/openrisc/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/openrisc/target_signal.h b/linux-user/openrisc/target_signal.h
index 077ec3d5e8..5b9d40974a 100644
--- a/linux-user/openrisc/target_signal.h
+++ b/linux-user/openrisc/target_signal.h
@@ -1,29 +1,6 @@
#ifndef OPENRISC_TARGET_SIGNAL_H
#define OPENRISC_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_long ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/* sigaltstack controls */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_SA_NOCLDSTOP 0x00000001
-#define TARGET_SA_NOCLDWAIT 0x00000002
-#define TARGET_SA_SIGINFO 0x00000004
-#define TARGET_SA_ONSTACK 0x08000000
-#define TARGET_SA_RESTART 0x10000000
-#define TARGET_SA_NODEFER 0x40000000
-#define TARGET_SA_RESETHAND 0x80000000
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/openrisc/target_syscall.h b/linux-user/openrisc/target_syscall.h
index ef0d89a551..7fe5b73d3b 100644
--- a/linux-user/openrisc/target_syscall.h
+++ b/linux-user/openrisc/target_syscall.h
@@ -15,7 +15,6 @@ struct target_pt_regs {
#define UNAME_MACHINE "openrisc"
#define UNAME_MINIMUM_RELEASE "2.6.32"
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/ppc/target_prctl.h b/linux-user/ppc/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/ppc/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/ppc/target_signal.h b/linux-user/ppc/target_signal.h
index 82184ab8f2..5be24e152b 100644
--- a/linux-user/ppc/target_signal.h
+++ b/linux-user/ppc/target_signal.h
@@ -1,24 +1,6 @@
#ifndef PPC_TARGET_SIGNAL_H
#define PPC_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#if !defined(TARGET_PPC64)
diff --git a/linux-user/ppc/target_syscall.h b/linux-user/ppc/target_syscall.h
index b9c4b813d3..8b364697d4 100644
--- a/linux-user/ppc/target_syscall.h
+++ b/linux-user/ppc/target_syscall.h
@@ -71,7 +71,6 @@ struct target_revectored_struct {
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 0x2000
#define TARGET_MCL_FUTURE 0x4000
#define TARGET_MCL_ONFAULT 0x8000
diff --git a/linux-user/riscv/target_prctl.h b/linux-user/riscv/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/riscv/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/riscv/target_signal.h b/linux-user/riscv/target_signal.h
index 3e36fddc9d..6c0470f0bc 100644
--- a/linux-user/riscv/target_signal.h
+++ b/linux-user/riscv/target_signal.h
@@ -1,18 +1,6 @@
#ifndef RISCV_TARGET_SIGNAL_H
#define RISCV_TARGET_SIGNAL_H
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/riscv/target_syscall.h b/linux-user/riscv/target_syscall.h
index 9b13161324..7601f10c28 100644
--- a/linux-user/riscv/target_syscall.h
+++ b/linux-user/riscv/target_syscall.h
@@ -51,7 +51,6 @@ struct target_pt_regs {
#define UNAME_MINIMUM_RELEASE "4.15.0"
#endif
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/s390x/target_prctl.h b/linux-user/s390x/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/s390x/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h
index 64f5f42201..41e0e34a55 100644
--- a/linux-user/s390x/target_signal.h
+++ b/linux-user/s390x/target_signal.h
@@ -1,21 +1,6 @@
#ifndef S390X_TARGET_SIGNAL_H
#define S390X_TARGET_SIGNAL_H
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
diff --git a/linux-user/s390x/target_syscall.h b/linux-user/s390x/target_syscall.h
index 94f84178db..4018988a25 100644
--- a/linux-user/s390x/target_syscall.h
+++ b/linux-user/s390x/target_syscall.h
@@ -27,7 +27,6 @@ struct target_pt_regs {
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS2
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/sh4/target_prctl.h b/linux-user/sh4/target_prctl.h
new file mode 100644
index 0000000000..5629ddbf39
--- /dev/null
+++ b/linux-user/sh4/target_prctl.h
@@ -0,0 +1 @@
+#include "../generic/target_prctl_unalign.h"
diff --git a/linux-user/sh4/target_signal.h b/linux-user/sh4/target_signal.h
index 04069cba66..eee6a1a7cd 100644
--- a/linux-user/sh4/target_signal.h
+++ b/linux-user/sh4/target_signal.h
@@ -1,24 +1,6 @@
#ifndef SH4_TARGET_SIGNAL_H
#define SH4_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
diff --git a/linux-user/sh4/target_syscall.h b/linux-user/sh4/target_syscall.h
index c1437adafe..148398855d 100644
--- a/linux-user/sh4/target_syscall.h
+++ b/linux-user/sh4/target_syscall.h
@@ -15,7 +15,6 @@ struct target_pt_regs {
#define UNAME_MACHINE "sh4"
#define UNAME_MINIMUM_RELEASE "2.6.32"
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 510db73c34..f813b4f18e 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -258,7 +258,6 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
return 0;
}
-#if !defined(TARGET_NIOS2)
/* Just set the guest's signal mask to the specified value; the
* caller is assumed to have called block_signals() already.
*/
@@ -268,7 +267,6 @@ void set_sigmask(const sigset_t *set)
ts->signal_mask = *set;
}
-#endif
/* sigaltstack management */
@@ -406,7 +404,12 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
case TARGET_SIGCHLD:
tinfo->_sifields._sigchld._pid = info->si_pid;
tinfo->_sifields._sigchld._uid = info->si_uid;
- tinfo->_sifields._sigchld._status = info->si_status;
+ if (si_code == CLD_EXITED)
+ tinfo->_sifields._sigchld._status = info->si_status;
+ else
+ tinfo->_sifields._sigchld._status
+ = host_to_target_signal(info->si_status & 0x7f)
+ | (info->si_status & ~0x7f);
tinfo->_sifields._sigchld._utime = info->si_utime;
tinfo->_sifields._sigchld._stime = info->si_stime;
si_type = QEMU_SI_CHLD;
diff --git a/linux-user/sparc/target_prctl.h b/linux-user/sparc/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/sparc/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/sparc/target_signal.h b/linux-user/sparc/target_signal.h
index e661ddd6ab..87757f0c4e 100644
--- a/linux-user/sparc/target_signal.h
+++ b/linux-user/sparc/target_signal.h
@@ -65,7 +65,6 @@ typedef struct target_sigaltstack {
#define TARGET_ARCH_HAS_KA_RESTORER 1
#define TARGET_MINSIGSTKSZ 4096
-#define TARGET_SIGSTKSZ 16384
#ifdef TARGET_ABI32
#define TARGET_ARCH_HAS_SETUP_FRAME
diff --git a/linux-user/sparc/target_syscall.h b/linux-user/sparc/target_syscall.h
index 087b39d39c..be77e44eb8 100644
--- a/linux-user/sparc/target_syscall.h
+++ b/linux-user/sparc/target_syscall.h
@@ -34,7 +34,6 @@ struct target_pt_regs {
* and copy_thread().
*/
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 4096
#define TARGET_MCL_CURRENT 0x2000
#define TARGET_MCL_FUTURE 0x4000
#define TARGET_MCL_ONFAULT 0x8000
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 56a3e17183..ce9d64896c 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -340,6 +340,36 @@ _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long *, user_mask_ptr);
+/* sched_attr is not defined in glibc */
+struct sched_attr {
+ uint32_t size;
+ uint32_t sched_policy;
+ uint64_t sched_flags;
+ int32_t sched_nice;
+ uint32_t sched_priority;
+ uint64_t sched_runtime;
+ uint64_t sched_deadline;
+ uint64_t sched_period;
+ uint32_t sched_util_min;
+ uint32_t sched_util_max;
+};
+#define __NR_sys_sched_getattr __NR_sched_getattr
+_syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
+ unsigned int, size, unsigned int, flags);
+#define __NR_sys_sched_setattr __NR_sched_setattr
+_syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
+ unsigned int, flags);
+#define __NR_sys_sched_getscheduler __NR_sched_getscheduler
+_syscall1(int, sys_sched_getscheduler, pid_t, pid);
+#define __NR_sys_sched_setscheduler __NR_sched_setscheduler
+_syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
+ const struct sched_param *, param);
+#define __NR_sys_sched_getparam __NR_sched_getparam
+_syscall2(int, sys_sched_getparam, pid_t, pid,
+ struct sched_param *, param);
+#define __NR_sys_sched_setparam __NR_sched_setparam
+_syscall2(int, sys_sched_setparam, pid_t, pid,
+ const struct sched_param *, param);
#define __NR_sys_getcpu __NR_getcpu
_syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
@@ -558,6 +588,24 @@ const char *target_strerror(int err)
return strerror(target_to_host_errno(err));
}
+static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
+{
+ int i;
+ uint8_t b;
+ if (usize <= ksize) {
+ return 1;
+ }
+ for (i = ksize; i < usize; i++) {
+ if (get_user_u8(b, addr + i)) {
+ return -TARGET_EFAULT;
+ }
+ if (b != 0) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
#define safe_syscall0(type, name) \
static type safe_##name(void) \
{ \
@@ -4867,7 +4915,7 @@ static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
* We can't fit all the extents into the fixed size buffer.
* Allocate one that is large enough and use it instead.
*/
- host_ifconf = malloc(outbufsz);
+ host_ifconf = g_try_malloc(outbufsz);
if (!host_ifconf) {
return -TARGET_ENOMEM;
}
@@ -4915,7 +4963,7 @@ static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
}
if (free_buf) {
- free(host_ifconf);
+ g_free(host_ifconf);
}
return ret;
@@ -6294,9 +6342,216 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
return ret;
}
#endif /* defined(TARGET_ABI32 */
-
#endif /* defined(TARGET_I386) */
+/*
+ * These constants are generic. Supply any that are missing from the host.
+ */
+#ifndef PR_SET_NAME
+# define PR_SET_NAME 15
+# define PR_GET_NAME 16
+#endif
+#ifndef PR_SET_FP_MODE
+# define PR_SET_FP_MODE 45
+# define PR_GET_FP_MODE 46
+# define PR_FP_MODE_FR (1 << 0)
+# define PR_FP_MODE_FRE (1 << 1)
+#endif
+#ifndef PR_SVE_SET_VL
+# define PR_SVE_SET_VL 50
+# define PR_SVE_GET_VL 51
+# define PR_SVE_VL_LEN_MASK 0xffff
+# define PR_SVE_VL_INHERIT (1 << 17)
+#endif
+#ifndef PR_PAC_RESET_KEYS
+# define PR_PAC_RESET_KEYS 54
+# define PR_PAC_APIAKEY (1 << 0)
+# define PR_PAC_APIBKEY (1 << 1)
+# define PR_PAC_APDAKEY (1 << 2)
+# define PR_PAC_APDBKEY (1 << 3)
+# define PR_PAC_APGAKEY (1 << 4)
+#endif
+#ifndef PR_SET_TAGGED_ADDR_CTRL
+# define PR_SET_TAGGED_ADDR_CTRL 55
+# define PR_GET_TAGGED_ADDR_CTRL 56
+# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+#endif
+#ifndef PR_MTE_TCF_SHIFT
+# define PR_MTE_TCF_SHIFT 1
+# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TAG_SHIFT 3
+# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+#endif
+#ifndef PR_SET_IO_FLUSHER
+# define PR_SET_IO_FLUSHER 57
+# define PR_GET_IO_FLUSHER 58
+#endif
+#ifndef PR_SET_SYSCALL_USER_DISPATCH
+# define PR_SET_SYSCALL_USER_DISPATCH 59
+#endif
+
+#include "target_prctl.h"
+
+static abi_long do_prctl_inval0(CPUArchState *env)
+{
+ return -TARGET_EINVAL;
+}
+
+static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
+{
+ return -TARGET_EINVAL;
+}
+
+#ifndef do_prctl_get_fp_mode
+#define do_prctl_get_fp_mode do_prctl_inval0
+#endif
+#ifndef do_prctl_set_fp_mode
+#define do_prctl_set_fp_mode do_prctl_inval1
+#endif
+#ifndef do_prctl_get_vl
+#define do_prctl_get_vl do_prctl_inval0
+#endif
+#ifndef do_prctl_set_vl
+#define do_prctl_set_vl do_prctl_inval1
+#endif
+#ifndef do_prctl_reset_keys
+#define do_prctl_reset_keys do_prctl_inval1
+#endif
+#ifndef do_prctl_set_tagged_addr_ctrl
+#define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
+#endif
+#ifndef do_prctl_get_tagged_addr_ctrl
+#define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
+#endif
+#ifndef do_prctl_get_unalign
+#define do_prctl_get_unalign do_prctl_inval1
+#endif
+#ifndef do_prctl_set_unalign
+#define do_prctl_set_unalign do_prctl_inval1
+#endif
+
+static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ abi_long ret;
+
+ switch (option) {
+ case PR_GET_PDEATHSIG:
+ {
+ int deathsig;
+ ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
+ arg3, arg4, arg5));
+ if (!is_error(ret) && arg2 && put_user_s32(deathsig, arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return ret;
+ }
+ case PR_GET_NAME:
+ {
+ void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
+ if (!name) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
+ arg3, arg4, arg5));
+ unlock_user(name, arg2, 16);
+ return ret;
+ }
+ case PR_SET_NAME:
+ {
+ void *name = lock_user(VERIFY_READ, arg2, 16, 1);
+ if (!name) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
+ arg3, arg4, arg5));
+ unlock_user(name, arg2, 0);
+ return ret;
+ }
+ case PR_GET_FP_MODE:
+ return do_prctl_get_fp_mode(env);
+ case PR_SET_FP_MODE:
+ return do_prctl_set_fp_mode(env, arg2);
+ case PR_SVE_GET_VL:
+ return do_prctl_get_vl(env);
+ case PR_SVE_SET_VL:
+ return do_prctl_set_vl(env, arg2);
+ case PR_PAC_RESET_KEYS:
+ if (arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_reset_keys(env, arg2);
+ case PR_SET_TAGGED_ADDR_CTRL:
+ if (arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_set_tagged_addr_ctrl(env, arg2);
+ case PR_GET_TAGGED_ADDR_CTRL:
+ if (arg2 || arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_get_tagged_addr_ctrl(env);
+
+ case PR_GET_UNALIGN:
+ return do_prctl_get_unalign(env, arg2);
+ case PR_SET_UNALIGN:
+ return do_prctl_set_unalign(env, arg2);
+
+ case PR_GET_DUMPABLE:
+ case PR_SET_DUMPABLE:
+ case PR_GET_KEEPCAPS:
+ case PR_SET_KEEPCAPS:
+ case PR_GET_TIMING:
+ case PR_SET_TIMING:
+ case PR_GET_TIMERSLACK:
+ case PR_SET_TIMERSLACK:
+ case PR_MCE_KILL:
+ case PR_MCE_KILL_GET:
+ case PR_GET_NO_NEW_PRIVS:
+ case PR_SET_NO_NEW_PRIVS:
+ case PR_GET_IO_FLUSHER:
+ case PR_SET_IO_FLUSHER:
+ /* Some prctl options have no pointer arguments and we can pass on. */
+ return get_errno(prctl(option, arg2, arg3, arg4, arg5));
+
+ case PR_GET_CHILD_SUBREAPER:
+ case PR_SET_CHILD_SUBREAPER:
+ case PR_GET_SPECULATION_CTRL:
+ case PR_SET_SPECULATION_CTRL:
+ case PR_GET_TID_ADDRESS:
+ /* TODO */
+ return -TARGET_EINVAL;
+
+ case PR_GET_FPEXC:
+ case PR_SET_FPEXC:
+ /* Was used for SPE on PowerPC. */
+ return -TARGET_EINVAL;
+
+ case PR_GET_ENDIAN:
+ case PR_SET_ENDIAN:
+ case PR_GET_FPEMU:
+ case PR_SET_FPEMU:
+ case PR_SET_MM:
+ case PR_GET_SECCOMP:
+ case PR_SET_SECCOMP:
+ case PR_SET_SYSCALL_USER_DISPATCH:
+ case PR_GET_THP_DISABLE:
+ case PR_SET_THP_DISABLE:
+ case PR_GET_TSC:
+ case PR_SET_TSC:
+ /* Disable to prevent the target disabling stuff we need. */
+ return -TARGET_EINVAL;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
+ option);
+ return -TARGET_EINVAL;
+ }
+}
+
#define NEW_STACK_SIZE 0x40000
@@ -7790,7 +8045,7 @@ static int open_self_maps(void *cpu_env, int fd)
(flags & PAGE_READ) ? 'r' : '-',
(flags & PAGE_WRITE_ORG) ? 'w' : '-',
(flags & PAGE_EXEC) ? 'x' : '-',
- e->is_priv ? 'p' : '-',
+ e->is_priv ? 'p' : 's',
(uint64_t) e->offset, e->dev, e->inode);
if (path) {
dprintf(fd, "%*s%s\n", 73 - count, "", path);
@@ -10550,30 +10805,32 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
case TARGET_NR_sched_setparam:
{
- struct sched_param *target_schp;
+ struct target_sched_param *target_schp;
struct sched_param schp;
if (arg2 == 0) {
return -TARGET_EINVAL;
}
- if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
+ if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
return -TARGET_EFAULT;
+ }
schp.sched_priority = tswap32(target_schp->sched_priority);
unlock_user_struct(target_schp, arg2, 0);
- return get_errno(sched_setparam(arg1, &schp));
+ return get_errno(sys_sched_setparam(arg1, &schp));
}
case TARGET_NR_sched_getparam:
{
- struct sched_param *target_schp;
+ struct target_sched_param *target_schp;
struct sched_param schp;
if (arg2 == 0) {
return -TARGET_EINVAL;
}
- ret = get_errno(sched_getparam(arg1, &schp));
+ ret = get_errno(sys_sched_getparam(arg1, &schp));
if (!is_error(ret)) {
- if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
+ if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
return -TARGET_EFAULT;
+ }
target_schp->sched_priority = tswap32(schp.sched_priority);
unlock_user_struct(target_schp, arg2, 1);
}
@@ -10581,19 +10838,106 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
case TARGET_NR_sched_setscheduler:
{
- struct sched_param *target_schp;
+ struct target_sched_param *target_schp;
struct sched_param schp;
if (arg3 == 0) {
return -TARGET_EINVAL;
}
- if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
+ if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
return -TARGET_EFAULT;
+ }
schp.sched_priority = tswap32(target_schp->sched_priority);
unlock_user_struct(target_schp, arg3, 0);
- return get_errno(sched_setscheduler(arg1, arg2, &schp));
+ return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
}
case TARGET_NR_sched_getscheduler:
- return get_errno(sched_getscheduler(arg1));
+ return get_errno(sys_sched_getscheduler(arg1));
+ case TARGET_NR_sched_getattr:
+ {
+ struct target_sched_attr *target_scha;
+ struct sched_attr scha;
+ if (arg2 == 0) {
+ return -TARGET_EINVAL;
+ }
+ if (arg3 > sizeof(scha)) {
+ arg3 = sizeof(scha);
+ }
+ ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
+ if (!is_error(ret)) {
+ target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
+ if (!target_scha) {
+ return -TARGET_EFAULT;
+ }
+ target_scha->size = tswap32(scha.size);
+ target_scha->sched_policy = tswap32(scha.sched_policy);
+ target_scha->sched_flags = tswap64(scha.sched_flags);
+ target_scha->sched_nice = tswap32(scha.sched_nice);
+ target_scha->sched_priority = tswap32(scha.sched_priority);
+ target_scha->sched_runtime = tswap64(scha.sched_runtime);
+ target_scha->sched_deadline = tswap64(scha.sched_deadline);
+ target_scha->sched_period = tswap64(scha.sched_period);
+ if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
+ target_scha->sched_util_min = tswap32(scha.sched_util_min);
+ target_scha->sched_util_max = tswap32(scha.sched_util_max);
+ }
+ unlock_user(target_scha, arg2, arg3);
+ }
+ return ret;
+ }
+ case TARGET_NR_sched_setattr:
+ {
+ struct target_sched_attr *target_scha;
+ struct sched_attr scha;
+ uint32_t size;
+ int zeroed;
+ if (arg2 == 0) {
+ return -TARGET_EINVAL;
+ }
+ if (get_user_u32(size, arg2)) {
+ return -TARGET_EFAULT;
+ }
+ if (!size) {
+ size = offsetof(struct target_sched_attr, sched_util_min);
+ }
+ if (size < offsetof(struct target_sched_attr, sched_util_min)) {
+ if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return -TARGET_E2BIG;
+ }
+
+ zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
+ if (zeroed < 0) {
+ return zeroed;
+ } else if (zeroed == 0) {
+ if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return -TARGET_E2BIG;
+ }
+ if (size > sizeof(struct target_sched_attr)) {
+ size = sizeof(struct target_sched_attr);
+ }
+
+ target_scha = lock_user(VERIFY_READ, arg2, size, 1);
+ if (!target_scha) {
+ return -TARGET_EFAULT;
+ }
+ scha.size = size;
+ scha.sched_policy = tswap32(target_scha->sched_policy);
+ scha.sched_flags = tswap64(target_scha->sched_flags);
+ scha.sched_nice = tswap32(target_scha->sched_nice);
+ scha.sched_priority = tswap32(target_scha->sched_priority);
+ scha.sched_runtime = tswap64(target_scha->sched_runtime);
+ scha.sched_deadline = tswap64(target_scha->sched_deadline);
+ scha.sched_period = tswap64(target_scha->sched_period);
+ if (size > offsetof(struct target_sched_attr, sched_util_min)) {
+ scha.sched_util_min = tswap32(target_scha->sched_util_min);
+ scha.sched_util_max = tswap32(target_scha->sched_util_max);
+ }
+ unlock_user(target_scha, arg2, 0);
+ return get_errno(sys_sched_setattr(arg1, &scha, arg3));
+ }
case TARGET_NR_sched_yield:
return get_errno(sched_yield());
case TARGET_NR_sched_get_priority_max:
@@ -10635,290 +10979,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
#endif
case TARGET_NR_prctl:
- switch (arg1) {
- case PR_GET_PDEATHSIG:
- {
- int deathsig;
- ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
- if (!is_error(ret) && arg2
- && put_user_s32(deathsig, arg2)) {
- return -TARGET_EFAULT;
- }
- return ret;
- }
-#ifdef PR_GET_NAME
- case PR_GET_NAME:
- {
- void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
- if (!name) {
- return -TARGET_EFAULT;
- }
- ret = get_errno(prctl(arg1, (unsigned long)name,
- arg3, arg4, arg5));
- unlock_user(name, arg2, 16);
- return ret;
- }
- case PR_SET_NAME:
- {
- void *name = lock_user(VERIFY_READ, arg2, 16, 1);
- if (!name) {
- return -TARGET_EFAULT;
- }
- ret = get_errno(prctl(arg1, (unsigned long)name,
- arg3, arg4, arg5));
- unlock_user(name, arg2, 0);
- return ret;
- }
-#endif
-#ifdef TARGET_MIPS
- case TARGET_PR_GET_FP_MODE:
- {
- CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
- ret = 0;
- if (env->CP0_Status & (1 << CP0St_FR)) {
- ret |= TARGET_PR_FP_MODE_FR;
- }
- if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
- ret |= TARGET_PR_FP_MODE_FRE;
- }
- return ret;
- }
- case TARGET_PR_SET_FP_MODE:
- {
- CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
- bool old_fr = env->CP0_Status & (1 << CP0St_FR);
- bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
- bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
- bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
-
- const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
- TARGET_PR_FP_MODE_FRE;
-
- /* If nothing to change, return right away, successfully. */
- if (old_fr == new_fr && old_fre == new_fre) {
- return 0;
- }
- /* Check the value is valid */
- if (arg2 & ~known_bits) {
- return -TARGET_EOPNOTSUPP;
- }
- /* Setting FRE without FR is not supported. */
- if (new_fre && !new_fr) {
- return -TARGET_EOPNOTSUPP;
- }
- if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
- /* FR1 is not supported */
- return -TARGET_EOPNOTSUPP;
- }
- if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
- && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
- /* cannot set FR=0 */
- return -TARGET_EOPNOTSUPP;
- }
- if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
- /* Cannot set FRE=1 */
- return -TARGET_EOPNOTSUPP;
- }
-
- int i;
- fpr_t *fpr = env->active_fpu.fpr;
- for (i = 0; i < 32 ; i += 2) {
- if (!old_fr && new_fr) {
- fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
- } else if (old_fr && !new_fr) {
- fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
- }
- }
-
- if (new_fr) {
- env->CP0_Status |= (1 << CP0St_FR);
- env->hflags |= MIPS_HFLAG_F64;
- } else {
- env->CP0_Status &= ~(1 << CP0St_FR);
- env->hflags &= ~MIPS_HFLAG_F64;
- }
- if (new_fre) {
- env->CP0_Config5 |= (1 << CP0C5_FRE);
- if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
- env->hflags |= MIPS_HFLAG_FRE;
- }
- } else {
- env->CP0_Config5 &= ~(1 << CP0C5_FRE);
- env->hflags &= ~MIPS_HFLAG_FRE;
- }
-
- return 0;
- }
-#endif /* MIPS */
-#ifdef TARGET_AARCH64
- case TARGET_PR_SVE_SET_VL:
- /*
- * We cannot support either PR_SVE_SET_VL_ONEXEC or
- * PR_SVE_VL_INHERIT. Note the kernel definition
- * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
- * even though the current architectural maximum is VQ=16.
- */
- ret = -TARGET_EINVAL;
- if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
- && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
- uint32_t vq, old_vq;
-
- old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
- vq = MAX(arg2 / 16, 1);
- vq = MIN(vq, cpu->sve_max_vq);
-
- if (vq < old_vq) {
- aarch64_sve_narrow_vq(env, vq);
- }
- env->vfp.zcr_el[1] = vq - 1;
- arm_rebuild_hflags(env);
- ret = vq * 16;
- }
- return ret;
- case TARGET_PR_SVE_GET_VL:
- ret = -TARGET_EINVAL;
- {
- ARMCPU *cpu = env_archcpu(cpu_env);
- if (cpu_isar_feature(aa64_sve, cpu)) {
- ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
- }
- }
- return ret;
- case TARGET_PR_PAC_RESET_KEYS:
- {
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
-
- if (arg3 || arg4 || arg5) {
- return -TARGET_EINVAL;
- }
- if (cpu_isar_feature(aa64_pauth, cpu)) {
- int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
- TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
- TARGET_PR_PAC_APGAKEY);
- int ret = 0;
- Error *err = NULL;
-
- if (arg2 == 0) {
- arg2 = all;
- } else if (arg2 & ~all) {
- return -TARGET_EINVAL;
- }
- if (arg2 & TARGET_PR_PAC_APIAKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apia,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APIBKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apib,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APDAKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apda,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APDBKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apdb,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APGAKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apga,
- sizeof(ARMPACKey), &err);
- }
- if (ret != 0) {
- /*
- * Some unknown failure in the crypto. The best
- * we can do is log it and fail the syscall.
- * The real syscall cannot fail this way.
- */
- qemu_log_mask(LOG_UNIMP,
- "PR_PAC_RESET_KEYS: Crypto failure: %s",
- error_get_pretty(err));
- error_free(err);
- return -TARGET_EIO;
- }
- return 0;
- }
- }
- return -TARGET_EINVAL;
- case TARGET_PR_SET_TAGGED_ADDR_CTRL:
- {
- abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
-
- if (cpu_isar_feature(aa64_mte, cpu)) {
- valid_mask |= TARGET_PR_MTE_TCF_MASK;
- valid_mask |= TARGET_PR_MTE_TAG_MASK;
- }
-
- if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
- return -TARGET_EINVAL;
- }
- env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
-
- if (cpu_isar_feature(aa64_mte, cpu)) {
- switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
- case TARGET_PR_MTE_TCF_NONE:
- case TARGET_PR_MTE_TCF_SYNC:
- case TARGET_PR_MTE_TCF_ASYNC:
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
- * Note that the syscall values are consistent with hw.
- */
- env->cp15.sctlr_el[1] =
- deposit64(env->cp15.sctlr_el[1], 38, 2,
- arg2 >> TARGET_PR_MTE_TCF_SHIFT);
-
- /*
- * Write PR_MTE_TAG to GCR_EL1[Exclude].
- * Note that the syscall uses an include mask,
- * and hardware uses an exclude mask -- invert.
- */
- env->cp15.gcr_el1 =
- deposit64(env->cp15.gcr_el1, 0, 16,
- ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
- arm_rebuild_hflags(env);
- }
- return 0;
- }
- case TARGET_PR_GET_TAGGED_ADDR_CTRL:
- {
- abi_long ret = 0;
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
-
- if (arg2 || arg3 || arg4 || arg5) {
- return -TARGET_EINVAL;
- }
- if (env->tagged_addr_enable) {
- ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
- }
- if (cpu_isar_feature(aa64_mte, cpu)) {
- /* See above. */
- ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
- << TARGET_PR_MTE_TCF_SHIFT);
- ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
- ~env->cp15.gcr_el1);
- }
- return ret;
- }
-#endif /* AARCH64 */
- case PR_GET_SECCOMP:
- case PR_SET_SECCOMP:
- /* Disable seccomp to prevent the target disabling syscalls we
- * need. */
- return -TARGET_EINVAL;
- default:
- /* Most prctl options have no pointer arguments */
- return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
- }
+ return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
break;
#ifdef TARGET_NR_arch_prctl
case TARGET_NR_arch_prctl:
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 0b13975937..cca561f622 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -2133,7 +2133,8 @@ struct target_stat64 {
abi_ulong __unused5;
};
-#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) || defined(TARGET_RISCV)
+#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) \
+ || defined(TARGET_RISCV) || defined(TARGET_HEXAGON)
/* These are the asm-generic versions of the stat and stat64 structures */
@@ -2244,31 +2245,6 @@ struct target_stat64 {
uint64_t st_ino;
};
-#elif defined(TARGET_HEXAGON)
-
-struct target_stat {
- unsigned long long st_dev;
- unsigned long long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned long long st_rdev;
- target_ulong __pad1;
- long long st_size;
- target_long st_blksize;
- int __pad2;
- long long st_blocks;
-
- target_long target_st_atime;
- target_long target_st_atime_nsec;
- target_long target_st_mtime;
- target_long target_st_mtime_nsec;
- target_long target_st_ctime;
- target_long target_st_ctime_nsec;
- int __unused[2];
-};
-
#else
#error unsupported CPU
#endif
@@ -2914,4 +2890,22 @@ struct target_statx {
/* 0x100 */
};
+/* from kernel's include/linux/sched/types.h */
+struct target_sched_attr {
+ abi_uint size;
+ abi_uint sched_policy;
+ abi_ullong sched_flags;
+ abi_int sched_nice;
+ abi_uint sched_priority;
+ abi_ullong sched_runtime;
+ abi_ullong sched_deadline;
+ abi_ullong sched_period;
+ abi_uint sched_util_min;
+ abi_uint sched_util_max;
+};
+
+struct target_sched_param {
+ abi_int sched_priority;
+};
+
#endif
diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h
index f71f372829..a8fdd6933b 100644
--- a/linux-user/user-internals.h
+++ b/linux-user/user-internals.h
@@ -64,7 +64,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8);
extern __thread CPUState *thread_cpu;
-void cpu_loop(CPUArchState *env);
+void QEMU_NORETURN cpu_loop(CPUArchState *env);
const char *target_strerror(int err);
int get_osversion(void);
void init_qemu_uname_release(void);
diff --git a/linux-user/x86_64/target_prctl.h b/linux-user/x86_64/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/x86_64/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/x86_64/target_signal.h b/linux-user/x86_64/target_signal.h
index 4673c5a886..9d9717406f 100644
--- a/linux-user/x86_64/target_signal.h
+++ b/linux-user/x86_64/target_signal.h
@@ -1,24 +1,6 @@
#ifndef X86_64_TARGET_SIGNAL_H
#define X86_64_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
/* For x86_64, use of SA_RESTORER is mandatory. */
diff --git a/linux-user/x86_64/target_syscall.h b/linux-user/x86_64/target_syscall.h
index 3ecccb72be..fb558345d3 100644
--- a/linux-user/x86_64/target_syscall.h
+++ b/linux-user/x86_64/target_syscall.h
@@ -100,7 +100,6 @@ struct target_msqid64_ds {
#define TARGET_ARCH_SET_FS 0x1002
#define TARGET_ARCH_GET_FS 0x1003
#define TARGET_ARCH_GET_GS 0x1004
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
diff --git a/linux-user/xtensa/target_prctl.h b/linux-user/xtensa/target_prctl.h
new file mode 100644
index 0000000000..eb53b31ad5
--- /dev/null
+++ b/linux-user/xtensa/target_prctl.h
@@ -0,0 +1 @@
+/* No special prctl support required. */
diff --git a/linux-user/xtensa/target_signal.h b/linux-user/xtensa/target_signal.h
index 1c7ee73154..e4b1bea5cb 100644
--- a/linux-user/xtensa/target_signal.h
+++ b/linux-user/xtensa/target_signal.h
@@ -1,23 +1,6 @@
#ifndef XTENSA_TARGET_SIGNAL_H
#define XTENSA_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/meson.build b/meson.build
index 53065e96ec..c1b1db1e28 100644
--- a/meson.build
+++ b/meson.build
@@ -2933,7 +2933,7 @@ foreach target : target_dirs
base_dir = 'bsd-user'
target_inc += include_directories('bsd-user/' / targetos)
dir = base_dir / abi
- arch_srcs += files(dir / 'target_arch_cpu.c')
+ arch_srcs += files(dir / 'signal.c', dir / 'target_arch_cpu.c')
endif
target_inc += include_directories(
base_dir,
diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
index ae651e2993..dba8e8655f 100644
--- a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
+++ b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
Binary files differ
diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf b/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf
index 3250d89408..a19363e27c 100644
--- a/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf
+++ b/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf
Binary files differ
diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
index f039884483..f223e56991 100644
--- a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
+++ b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
Binary files differ
diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf b/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf
index ef261c98d1..c59573d026 100644
--- a/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf
+++ b/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf
Binary files differ
diff --git a/python/qemu/aqmp/aqmp_tui.py b/python/qemu/aqmp/aqmp_tui.py
index a2929f771c..f1e926dd75 100644
--- a/python/qemu/aqmp/aqmp_tui.py
+++ b/python/qemu/aqmp/aqmp_tui.py
@@ -35,7 +35,8 @@ from pygments import token as Token
import urwid
import urwid_readline
-from ..qmp import QEMUMonitorProtocol, QMPBadPortError
+from qemu.qmp import QEMUMonitorProtocol, QMPBadPortError
+
from .error import ProtocolError
from .message import DeserializationError, Message, UnexpectedTypeError
from .protocol import ConnectError, Runstate
diff --git a/python/qemu/aqmp/protocol.py b/python/qemu/aqmp/protocol.py
index 5190b33b13..c4fbe35a0e 100644
--- a/python/qemu/aqmp/protocol.py
+++ b/python/qemu/aqmp/protocol.py
@@ -43,8 +43,8 @@ from .util import (
T = TypeVar('T')
+_U = TypeVar('_U')
_TaskFN = Callable[[], Awaitable[None]] # aka ``async def func() -> None``
-_FutureT = TypeVar('_FutureT', bound=Optional['asyncio.Future[Any]'])
class Runstate(Enum):
@@ -591,7 +591,8 @@ class AsyncProtocol(Generic[T]):
"""
Fully reset this object to a clean state and return to `IDLE`.
"""
- def _paranoid_task_erase(task: _FutureT) -> Optional[_FutureT]:
+ def _paranoid_task_erase(task: Optional['asyncio.Future[_U]']
+ ) -> Optional['asyncio.Future[_U]']:
# Help to erase a task, ENSURING it is fully quiesced first.
assert (task is None) or task.done()
return None if (task and task.done()) else task
diff --git a/python/qemu/qmp/qom_common.py b/python/qemu/qmp/qom_common.py
index a59ae1a2a1..2e4c741f77 100644
--- a/python/qemu/qmp/qom_common.py
+++ b/python/qemu/qmp/qom_common.py
@@ -30,10 +30,6 @@ from typing import (
from . import QEMUMonitorProtocol, QMPError
-# The following is needed only for a type alias.
-Subparsers = argparse._SubParsersAction # pylint: disable=protected-access
-
-
class ObjectPropertyInfo:
"""
Represents the return type from e.g. qom-list.
@@ -89,7 +85,7 @@ class QOMCommand:
self.qmp.connect()
@classmethod
- def register(cls, subparsers: Subparsers) -> None:
+ def register(cls, subparsers: Any) -> None:
"""
Register this command with the argument parser.
diff --git a/qapi/machine.json b/qapi/machine.json
index 372535b348..b6a37e17c4 100644
--- a/qapi/machine.json
+++ b/qapi/machine.json
@@ -1568,3 +1568,15 @@
{ 'command': 'x-query-usb',
'returns': 'HumanReadableText',
'features': [ 'unstable' ] }
+
+##
+# @SmbiosEntryPointType:
+#
+# @32: SMBIOS version 2.1 (32-bit) Entry Point
+#
+# @64: SMBIOS version 3.0 (64-bit) Entry Point
+#
+# Since: 7.0
+##
+{ 'enum': 'SmbiosEntryPointType',
+ 'data': [ '32', '64' ] }
diff --git a/qemu-options.hx b/qemu-options.hx
index fd1f8135fb..ec90505d84 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -2065,14 +2065,6 @@ SRST
``-display sdl,grab-mod=rctrl`` instead.
ERST
-DEF("no-quit", 0, QEMU_OPTION_no_quit,
- "-no-quit disable SDL/GTK window close capability (deprecated)\n", QEMU_ARCH_ALL)
-SRST
-``-no-quit``
- Disable window close capability (SDL and GTK only). This option is
- deprecated, please use ``-display ...,window-close=off`` instead.
-ERST
-
DEF("sdl", 0, QEMU_OPTION_sdl,
"-sdl shorthand for -display sdl\n", QEMU_ARCH_ALL)
SRST
diff --git a/roms/opensbi b/roms/opensbi
-Subproject 234ed8e427f4d92903123199f6590d144e0d935
+Subproject 48f91ee9c960f048c4a7d1da4447d31e04931e3
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
index 7de996d536..e9bfeb94d3 100755
--- a/scripts/qemu-binfmt-conf.sh
+++ b/scripts/qemu-binfmt-conf.sh
@@ -340,7 +340,9 @@ PERSISTENT=no
PRESERVE_ARG0=no
QEMU_SUFFIX=""
-options=$(getopt -o ds:Q:S:e:hc:p:g: -l debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,persistent:,preserve-argv0: -- "$@")
+_longopts="debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,\
+persistent:,preserve-argv0:"
+options=$(getopt -o ds:Q:S:e:hc:p:g:F: -l ${_longopts} -- "$@")
eval set -- "$options"
while true ; do
diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py
index 4864435f39..fc370691e0 100644
--- a/scripts/simplebench/bench-example.py
+++ b/scripts/simplebench/bench-example.py
@@ -25,7 +25,7 @@ from bench_block_job import bench_block_copy, drv_file, drv_nbd
def bench_func(env, case):
""" Handle one "cell" of benchmarking table. """
- return bench_block_copy(env['qemu_binary'], env['cmd'], {}
+ return bench_block_copy(env['qemu_binary'], env['cmd'], {},
case['source'], case['target'])
diff --git a/softmmu/cpus.c b/softmmu/cpus.c
index 071085f840..23bca46b07 100644
--- a/softmmu/cpus.c
+++ b/softmmu/cpus.c
@@ -352,6 +352,10 @@ static void qemu_init_sigbus(void)
{
struct sigaction action;
+ /*
+ * ALERT: when modifying this, take care that SIGBUS forwarding in
+ * os_mem_prealloc() will continue working as expected.
+ */
memset(&action, 0, sizeof(action));
action.sa_flags = SA_SIGINFO;
action.sa_sigaction = sigbus_handler;
diff --git a/softmmu/vl.c b/softmmu/vl.c
index d9e4c619d3..a8cad43691 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -1941,7 +1941,7 @@ static void qemu_create_early_backends(void)
"for SDL, ignoring option");
}
if (dpy.has_window_close && !use_gtk && !use_sdl) {
- error_report("-no-quit is only valid for GTK and SDL, "
+ error_report("window-close is only valid for GTK and SDL, "
"ignoring option");
}
@@ -3301,12 +3301,6 @@ void qemu_init(int argc, char **argv, char **envp)
warn_report("-ctrl-grab is deprecated, please use "
"-display sdl,grab-mod=rctrl instead.");
break;
- case QEMU_OPTION_no_quit:
- dpy.has_window_close = true;
- dpy.window_close = false;
- warn_report("-no-quit is deprecated, please use "
- "-display ...,window-close=off instead.");
- break;
case QEMU_OPTION_sdl:
warn_report("-sdl is deprecated, use -display sdl instead.");
#ifdef CONFIG_SDL
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
index afd975c878..e819211503 100644
--- a/target/alpha/cpu.h
+++ b/target/alpha/cpu.h
@@ -383,6 +383,8 @@ enum {
#define ENV_FLAG_TB_MASK \
(ENV_FLAG_PAL_MODE | ENV_FLAG_PS_USER | ENV_FLAG_FEN)
+#define TB_FLAG_UNALIGN (1u << 1)
+
static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
{
int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX;
@@ -470,6 +472,9 @@ static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
*pc = env->pc;
*cs_base = 0;
*pflags = env->flags & ENV_FLAG_TB_MASK;
+#ifdef CONFIG_USER_ONLY
+ *pflags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
+#endif
}
#ifdef CONFIG_USER_ONLY
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index a4c3f43e72..ca78a0faed 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -45,7 +45,9 @@ typedef struct DisasContext DisasContext;
struct DisasContext {
DisasContextBase base;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ MemOp unalign;
+#else
uint64_t palbr;
#endif
uint32_t tbflags;
@@ -68,6 +70,12 @@ struct DisasContext {
TCGv sink;
};
+#ifdef CONFIG_USER_ONLY
+#define UNALIGN(C) (C)->unalign
+#else
+#define UNALIGN(C) 0
+#endif
+
/* Target-specific return values from translate_one, indicating the
state of the TB. Note that DISAS_NEXT indicates that we are not
exiting the TB. */
@@ -270,7 +278,7 @@ static inline DisasJumpType gen_invalid(DisasContext *ctx)
static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
{
TCGv_i32 tmp32 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
gen_helper_memory_to_f(dest, tmp32);
tcg_temp_free_i32(tmp32);
}
@@ -278,7 +286,7 @@ static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
{
TCGv tmp = tcg_temp_new();
- tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
gen_helper_memory_to_g(dest, tmp);
tcg_temp_free(tmp);
}
@@ -286,14 +294,14 @@ static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
{
TCGv_i32 tmp32 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
gen_helper_memory_to_s(dest, tmp32);
tcg_temp_free_i32(tmp32);
}
static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
{
- tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
}
static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
@@ -324,6 +332,8 @@ static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
if (clear) {
tcg_gen_andi_i64(addr, addr, ~0x7);
+ } else if (!locked) {
+ op |= UNALIGN(ctx);
}
dest = ctx->ir[ra];
@@ -340,7 +350,7 @@ static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
{
TCGv_i32 tmp32 = tcg_temp_new_i32();
gen_helper_f_to_memory(tmp32, addr);
- tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
tcg_temp_free_i32(tmp32);
}
@@ -348,7 +358,7 @@ static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
{
TCGv tmp = tcg_temp_new();
gen_helper_g_to_memory(tmp, src);
- tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
tcg_temp_free(tmp);
}
@@ -356,13 +366,13 @@ static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
{
TCGv_i32 tmp32 = tcg_temp_new_i32();
gen_helper_s_to_memory(tmp32, src);
- tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
tcg_temp_free_i32(tmp32);
}
static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
{
- tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
}
static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
@@ -383,6 +393,8 @@ static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
if (clear) {
tcg_gen_andi_i64(addr, addr, ~0x7);
+ } else {
+ op |= UNALIGN(ctx);
}
src = load_gpr(ctx, ra);
@@ -1487,7 +1499,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x0B:
/* LDQ_U */
- gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 1, 0);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0);
break;
case 0x0C:
/* LDWU */
@@ -1506,7 +1518,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x0F:
/* STQ_U */
- gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 1);
+ gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1);
break;
case 0x10:
@@ -2457,7 +2469,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
@@ -2467,7 +2479,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
@@ -2496,7 +2508,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ);
break;
case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/
@@ -2512,7 +2524,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0xF:
/* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ);
break;
}
tcg_temp_free(addr);
@@ -2725,7 +2737,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ);
tcg_temp_free(tmp);
break;
case 0x2:
@@ -2736,7 +2748,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x3:
/* Quadword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
- MMU_PHYS_IDX, MO_LEQ);
+ MMU_PHYS_IDX, MO_LEUQ);
break;
case 0x4:
/* Longword virtual access */
@@ -2826,7 +2838,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x29:
/* LDQ */
- gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 0);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0);
break;
case 0x2A:
/* LDL_L */
@@ -2834,7 +2846,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x2B:
/* LDQ_L */
- gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 1);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1);
break;
case 0x2C:
/* STL */
@@ -2842,7 +2854,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x2D:
/* STQ */
- gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 0);
+ gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0);
break;
case 0x2E:
/* STL_C */
@@ -2852,7 +2864,7 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x2F:
/* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LEQ);
+ ctx->mem_idx, MO_LEUQ);
break;
case 0x30:
/* BR */
@@ -2942,6 +2954,7 @@ static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
#ifdef CONFIG_USER_ONLY
ctx->ir = cpu_std_ir;
+ ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
#else
ctx->palbr = env->palbr;
ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
index 5ae2ecb0f3..d6a6fd73d9 100644
--- a/target/arm/helper-a64.c
+++ b/target/arm/helper-a64.c
@@ -513,8 +513,8 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
uint64_t o0, o1;
bool success;
int mem_idx = cpu_mmu_index(env, false);
- MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
- MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
+ MemOpIdx oi0 = make_memop_idx(MO_LEUQ | MO_ALIGN_16, mem_idx);
+ MemOpIdx oi1 = make_memop_idx(MO_LEUQ, mem_idx);
o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra);
o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra);
@@ -565,8 +565,8 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
uint64_t o0, o1;
bool success;
int mem_idx = cpu_mmu_index(env, false);
- MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
- MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
+ MemOpIdx oi0 = make_memop_idx(MO_BEUQ | MO_ALIGN_16, mem_idx);
+ MemOpIdx oi1 = make_memop_idx(MO_BEUQ, mem_idx);
o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra);
o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra);
diff --git a/target/arm/helper.c b/target/arm/helper.c
index db837d53bd..cfca0f5ba6 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -6964,18 +6964,42 @@ static const ARMCPRegInfo tlbios_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
.access = PL2_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_alle2is_write },
+ { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2is_write },
{ .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
.access = PL2_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2is_write },
{ .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
.access = PL2_W, .type = ARM_CP_NO_RAW,
@@ -6996,6 +7020,14 @@ static const ARMCPRegInfo tlbios_reginfo[] = {
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
.access = PL3_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_alle3is_write },
+ { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
REGINFO_SENTINEL
};
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
index 17af8dc95a..5be4b9b834 100644
--- a/target/arm/translate-a32.h
+++ b/target/arm/translate-a32.h
@@ -117,13 +117,13 @@ void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
TCGv_i32 a32, int index)
{
- gen_aa32_ld_i64(s, val, a32, index, MO_Q);
+ gen_aa32_ld_i64(s, val, a32, index, MO_UQ);
}
static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
TCGv_i32 a32, int index)
{
- gen_aa32_st_i64(s, val, a32, index, MO_Q);
+ gen_aa32_st_i64(s, val, a32, index, MO_UQ);
}
DO_GEN_LD(8u, MO_UB)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 130a9ff8d5..5a1df25f91 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -973,7 +973,7 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
- mop = s->be_data | MO_Q;
+ mop = s->be_data | MO_UQ;
tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
mop | (s->align_mem ? MO_ALIGN_16 : 0));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
@@ -1007,7 +1007,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
tmphi = tcg_temp_new_i64();
tcg_hiaddr = tcg_temp_new_i64();
- mop = s->be_data | MO_Q;
+ mop = s->be_data | MO_UQ;
tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
mop | (s->align_mem ? MO_ALIGN_16 : 0));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
@@ -4099,10 +4099,10 @@ static void disas_ldst_tag(DisasContext *s, uint32_t insn)
int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
- MO_Q | MO_ALIGN_16);
+ MO_UQ | MO_ALIGN_16);
for (i = 8; i < n; i += 8) {
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
- tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_Q);
+ tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ);
}
tcg_temp_free_i64(tcg_zero);
}
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
index dd43de558e..3854dd3516 100644
--- a/target/arm/translate-neon.c
+++ b/target/arm/translate-neon.c
@@ -73,7 +73,7 @@ static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop)
case MO_UL:
tcg_gen_ld32u_i64(var, cpu_env, offset);
break;
- case MO_Q:
+ case MO_UQ:
tcg_gen_ld_i64(var, cpu_env, offset);
break;
default:
@@ -1830,7 +1830,7 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
return false;
}
- if ((a->vd & 1) || (src1_mop == MO_Q && (a->vn & 1))) {
+ if ((a->vd & 1) || (src1_mop == MO_UQ && (a->vn & 1))) {
return false;
}
@@ -1910,7 +1910,7 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
}; \
int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \
return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \
- SRC1WIDE ? MO_Q : narrow_mop, \
+ SRC1WIDE ? MO_UQ : narrow_mop, \
narrow_mop); \
}
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 76b5fe9f31..33ca1bcfac 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -5087,7 +5087,7 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
t0 = tcg_temp_new_i64();
for (i = 0; i < len_align; i += 8) {
- tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
tcg_gen_st_i64(t0, cpu_env, vofs + i);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
}
@@ -5104,7 +5104,7 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
gen_set_label(loop);
t0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
tp = tcg_temp_new_ptr();
@@ -5177,7 +5177,7 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
t0 = tcg_temp_new_i64();
for (i = 0; i < len_align; i += 8) {
tcg_gen_ld_i64(t0, cpu_env, vofs + i);
- tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
}
tcg_temp_free_i64(t0);
@@ -5199,7 +5199,7 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
tcg_gen_addi_ptr(i, i, 8);
tcg_temp_free_ptr(tp);
- tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
tcg_temp_free_i64(t0);
@@ -5283,7 +5283,7 @@ static const MemOp dtype_mop[16] = {
MO_UB, MO_UB, MO_UB, MO_UB,
MO_SL, MO_UW, MO_UW, MO_UW,
MO_SW, MO_SW, MO_UL, MO_UL,
- MO_SB, MO_SB, MO_SB, MO_Q
+ MO_SB, MO_SB, MO_SB, MO_UQ
};
#define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
index 59bcaec5be..17f796e32a 100644
--- a/target/arm/translate-vfp.c
+++ b/target/arm/translate-vfp.c
@@ -1170,11 +1170,11 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
addr = add_reg_for_lit(s, a->rn, offset);
tmp = tcg_temp_new_i64();
if (a->l) {
- gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
vfp_store_reg64(tmp, a->vd);
} else {
vfp_load_reg64(tmp, a->vd);
- gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
}
tcg_temp_free_i64(tmp);
tcg_temp_free_i32(addr);
@@ -1322,12 +1322,12 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
for (i = 0; i < n; i++) {
if (a->l) {
/* load */
- gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
vfp_store_reg64(tmp, a->vd + i);
} else {
/* store */
vfp_load_reg64(tmp, a->vd + i);
- gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
}
tcg_gen_addi_i32(addr, addr, offset);
}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 0a3840d227..bf2196b9e2 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -1217,7 +1217,7 @@ void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
case MO_UL:
tcg_gen_ld32u_i64(dest, cpu_env, off);
break;
- case MO_Q:
+ case MO_UQ:
tcg_gen_ld_i64(dest, cpu_env, off);
break;
default:
diff --git a/target/cris/translate.c b/target/cris/translate.c
index 59325b388a..3656cd6db1 100644
--- a/target/cris/translate.c
+++ b/target/cris/translate.c
@@ -1047,7 +1047,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
cris_store_direct_jmp(dc);
}
- tcg_gen_qemu_ld_i64(dst, addr, mem_index, MO_TEQ);
+ tcg_gen_qemu_ld_i64(dst, addr, mem_index, MO_TEUQ);
}
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index 294fd7297f..45fd338b02 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -259,12 +259,14 @@ static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
return hppa_form_gva_psw(env->psw, spc, off);
}
-/* Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
+/*
+ * Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
* TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the
* same value.
*/
#define TB_FLAG_SR_SAME PSW_I
#define TB_FLAG_PRIV_SHIFT 8
+#define TB_FLAG_UNALIGN 0x400
static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
target_ulong *cs_base,
@@ -279,6 +281,7 @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
#ifdef CONFIG_USER_ONLY
*pc = env->iaoq_f & -4;
*cs_base = env->iaoq_b & -4;
+ flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
#else
/* ??? E, T, H, L, B, P bits need to be here, when implemented. */
flags |= env->psw & (PSW_W | PSW_C | PSW_D);
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 952027a28e..c6195590f8 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -274,8 +274,18 @@ typedef struct DisasContext {
int mmu_idx;
int privilege;
bool psw_n_nonzero;
+
+#ifdef CONFIG_USER_ONLY
+ MemOp unalign;
+#endif
} DisasContext;
+#ifdef CONFIG_USER_ONLY
+#define UNALIGN(C) (C)->unalign
+#else
+#define UNALIGN(C) 0
+#endif
+
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
static int expand_sm_imm(DisasContext *ctx, int val)
{
@@ -1475,7 +1485,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
ctx->mmu_idx == MMU_PHYS_IDX);
- tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
+ tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
}
@@ -1493,7 +1503,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
ctx->mmu_idx == MMU_PHYS_IDX);
- tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
}
@@ -1511,7 +1521,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
ctx->mmu_idx == MMU_PHYS_IDX);
- tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
+ tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
}
@@ -1529,7 +1539,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
ctx->mmu_idx == MMU_PHYS_IDX);
- tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
+ tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
}
@@ -1599,7 +1609,7 @@ static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = tcg_temp_new_i64();
- do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
+ do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
save_frd(rt, tmp);
tcg_temp_free_i64(tmp);
@@ -1655,7 +1665,7 @@ static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
nullify_over(ctx);
tmp = load_frd(rt);
- do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
+ do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
tcg_temp_free_i64(tmp);
return nullify_end(ctx);
@@ -4107,6 +4117,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->mmu_idx = MMU_USER_IDX;
ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
+ ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
#else
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
index a207e624cb..e3cdafd2d4 100644
--- a/target/i386/tcg/mem_helper.c
+++ b/target/i386/tcg/mem_helper.c
@@ -67,7 +67,7 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
{
uintptr_t ra = GETPC();
int mem_idx = cpu_mmu_index(env, false);
- MemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
+ MemOpIdx oi = make_memop_idx(MO_TEUQ, mem_idx);
oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
}
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index 05f9336c9b..77878cd832 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -2719,23 +2719,23 @@ static void gen_jmp(DisasContext *s, target_ulong eip)
static inline void gen_ldq_env_A0(DisasContext *s, int offset)
{
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ);
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset);
}
static inline void gen_stq_env_A0(DisasContext *s, int offset)
{
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset);
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
}
static inline void gen_ldo_env_A0(DisasContext *s, int offset)
{
int mem_index = s->mem_index;
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ);
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ);
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ);
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
}
@@ -2743,10 +2743,10 @@ static inline void gen_sto_env_A0(DisasContext *s, int offset)
{
int mem_index = s->mem_index;
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ);
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
}
static inline void gen_op_movo(DisasContext *s, int d_offset, int s_offset)
@@ -4255,7 +4255,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
tcg_gen_mov_i64(cpu_regs[rm], s->tmp1_i64);
} else {
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
}
#else
goto illegal_op;
@@ -4328,7 +4328,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_op_mov_v_reg(s, ot, s->tmp1_i64, rm);
} else {
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
}
tcg_gen_st_i64(s->tmp1_i64, cpu_env,
offsetof(CPUX86State,
@@ -5948,7 +5948,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 2:
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
gen_helper_fldl_FT0(cpu_env, s->tmp1_i64);
break;
case 3:
@@ -5987,7 +5987,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 2:
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
gen_helper_fldl_ST0(cpu_env, s->tmp1_i64);
break;
case 3:
@@ -6009,7 +6009,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 2:
gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
break;
case 3:
default:
@@ -6035,7 +6035,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 2:
gen_helper_fstl_ST0(s->tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
break;
case 3:
default:
@@ -6104,13 +6104,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
break;
case 0x3d: /* fildll */
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
gen_helper_fildll_ST0(cpu_env, s->tmp1_i64);
break;
case 0x3f: /* fistpll */
gen_helper_fistll_ST0(s->tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
gen_helper_fpop(cpu_env);
break;
default:
@@ -7932,10 +7932,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
tcg_gen_addi_tl(s->A0, s->A0, 8);
tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
} else {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
s->mem_index, MO_LEUL);
@@ -8039,10 +8039,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
tcg_gen_addi_tl(s->A0, s->A0, 8);
tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
} else {
tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
s->mem_index, MO_LEUL);
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index cfbc987ba6..acbd473515 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -415,7 +415,10 @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
oldsr = sr;
env->aregs[7] = sp;
cpu_m68k_set_sr(env, sr &= ~SR_M);
- sp = env->aregs[7] & ~1;
+ sp = env->aregs[7];
+ if (!m68k_feature(env, M68K_FEATURE_UNALIGNED_DATA)) {
+ sp &= ~1;
+ }
do_stack_frame(env, &sp, 1, oldsr, 0, retaddr);
} else {
do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
@@ -774,7 +777,7 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
uintptr_t ra = GETPC();
#if defined(CONFIG_ATOMIC64)
int mmu_idx = cpu_mmu_index(env, 0);
- MemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_BEUQ, mmu_idx);
#endif
if (parallel) {
diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc
index 0da4c802a3..0760941431 100644
--- a/target/mips/tcg/micromips_translate.c.inc
+++ b/target/mips/tcg/micromips_translate.c.inc
@@ -1001,20 +1001,20 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd,
gen_reserved_instruction(ctx);
return;
}
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ);
gen_store_gpr(t1, rd);
tcg_gen_movi_tl(t1, 8);
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ);
gen_store_gpr(t1, rd + 1);
break;
case SDP:
gen_load_gpr(t1, rd);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_movi_tl(t1, 8);
gen_op_addr_add(ctx, t0, t0, t1);
gen_load_gpr(t1, rd + 1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ);
break;
#endif
}
@@ -2578,7 +2578,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
case SCD:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
- gen_st_cond(ctx, rt, rs, offset, MO_TEQ, false);
+ gen_st_cond(ctx, rt, rs, offset, MO_TEUQ, false);
break;
#endif
case LD_EVA:
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
index 47db35d7dd..1c2264417c 100644
--- a/target/mips/tcg/translate.c
+++ b/target/mips/tcg/translate.c
@@ -2031,7 +2031,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
gen_store_gpr(t0, rt);
break;
case OPC_LD:
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@@ -2053,7 +2053,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
}
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
tcg_gen_shl_tl(t0, t0, t1);
t2 = tcg_const_tl(-1);
tcg_gen_shl_tl(t2, t2, t1);
@@ -2077,7 +2077,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
}
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
tcg_gen_shr_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 63);
t2 = tcg_const_tl(0xfffffffffffffffeull);
@@ -2093,7 +2093,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
t1 = tcg_const_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
tcg_temp_free(t1);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
gen_store_gpr(t0, rt);
break;
#endif
@@ -2224,7 +2224,7 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt,
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_SD:
- tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
break;
case OPC_SDL:
@@ -2334,7 +2334,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
case OPC_LDC1:
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, fp0, ft);
tcg_temp_free_i64(fp0);
@@ -2344,7 +2344,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, ft);
- tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free_i64(fp0);
}
@@ -3092,7 +3092,7 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc,
check_mips_64(ctx);
offset = sextract32(ctx->opcode << 3, 0, 21);
addr = addr_add(ctx, (pc & ~0x7), offset);
- gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEQ);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUQ);
break;
#endif
default:
@@ -4344,10 +4344,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
case OPC_GSLQ:
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rt);
gen_store_gpr(t0, lsq_rt1);
@@ -4357,10 +4357,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
check_cp1_enabled(ctx);
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t1, rt);
gen_store_fpr64(ctx, t0, lsq_rt1);
@@ -4370,11 +4370,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
gen_load_gpr(t1, lsq_rt1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free(t1);
break;
@@ -4383,11 +4383,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
gen_load_fpr64(ctx, t1, lsq_rt1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free(t1);
break;
@@ -4467,7 +4467,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
}
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_shl_tl(t0, t0, t1);
t2 = tcg_const_tl(-1);
tcg_gen_shl_tl(t2, t2, t1);
@@ -4489,7 +4489,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt,
}
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_shr_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 63);
t2 = tcg_const_tl(0xfffffffffffffffeull);
@@ -4642,7 +4642,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
@@ -4664,7 +4664,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t0, rt);
break;
@@ -4693,7 +4693,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
case OPC_GSSDX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free(t1);
break;
@@ -4709,7 +4709,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
case OPC_GSSDXC1:
t1 = tcg_temp_new();
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free(t1);
break;
@@ -11330,7 +11330,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
check_cp1_registers(ctx, fd);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -11341,7 +11341,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -11361,7 +11361,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
tcg_temp_free_i64(fp0);
}
break;
@@ -11371,7 +11371,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
tcg_temp_free_i64(fp0);
}
break;
@@ -12187,7 +12187,7 @@ static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc,
break;
#if defined(TARGET_MIPS64)
case OPC_LDX:
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_gpr(t0, rd);
break;
#endif
@@ -14403,7 +14403,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
#endif
#if defined(TARGET_MIPS64)
case R6_OPC_SCD:
- gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false);
+ gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false);
break;
case R6_OPC_LLD:
gen_ld(ctx, op1, rt, rs, imm);
@@ -15843,7 +15843,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx)
check_insn_opc_user_only(ctx, INSN_R5900);
}
check_mips_64(ctx);
- gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false);
+ gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false);
break;
case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */
if (ctx->insn_flags & ISA_MIPS_R6) {
diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c
index 6d51fe17c1..4e479c2d10 100644
--- a/target/mips/tcg/tx79_translate.c
+++ b/target/mips/tcg/tx79_translate.c
@@ -355,12 +355,12 @@ static bool trans_LQ(DisasContext *ctx, arg_i *a)
tcg_gen_andi_tl(addr, addr, ~0xf);
/* Lower half */
- tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
gen_store_gpr(t0, a->rt);
/* Upper half */
tcg_gen_addi_i64(addr, addr, 8);
- tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
gen_store_gpr_hi(t0, a->rt);
tcg_temp_free(t0);
@@ -383,12 +383,12 @@ static bool trans_SQ(DisasContext *ctx, arg_i *a)
/* Lower half */
gen_load_gpr(t0, a->rt);
- tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
/* Upper half */
tcg_gen_addi_i64(addr, addr, 8);
gen_load_gpr_hi(t0, a->rt);
- tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
tcg_temp_free(addr);
tcg_temp_free(t0);
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
index 1a69ed7a49..d2ba0c5bbd 100644
--- a/target/nios2/cpu.h
+++ b/target/nios2/cpu.h
@@ -160,9 +160,9 @@ struct CPUNios2State {
#if !defined(CONFIG_USER_ONLY)
Nios2MMU mmu;
-
uint32_t irq_pending;
#endif
+ int error_code;
};
/**
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
index 08d7ac5398..f9abc2fdd2 100644
--- a/target/nios2/translate.c
+++ b/target/nios2/translate.c
@@ -636,6 +636,21 @@ static void divu(DisasContext *dc, uint32_t code, uint32_t flags)
tcg_temp_free(t0);
}
+static void trap(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+#ifdef CONFIG_USER_ONLY
+ /*
+ * The imm5 field is not stored anywhere on real hw; the kernel
+ * has to load the insn and extract the field. But we can make
+ * things easier for cpu_loop if we pop this into env->error_code.
+ */
+ R_TYPE(instr, code);
+ tcg_gen_st_i32(tcg_constant_i32(instr.imm5), cpu_env,
+ offsetof(CPUNios2State, error_code));
+#endif
+ t_gen_helper_raise_exception(dc, EXCP_TRAP);
+}
+
static const Nios2Instruction r_type_instructions[] = {
INSTRUCTION_ILLEGAL(),
INSTRUCTION(eret), /* eret */
@@ -682,7 +697,7 @@ static const Nios2Instruction r_type_instructions[] = {
INSTRUCTION_ILLEGAL(),
INSTRUCTION_ILLEGAL(),
INSTRUCTION_ILLEGAL(),
- INSTRUCTION_FLG(gen_excp, EXCP_TRAP), /* trap */
+ INSTRUCTION(trap), /* trap */
INSTRUCTION(wrctl), /* wrctl */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_FLG(gen_cmpxx, TCG_COND_LTU), /* cmpltu */
@@ -780,15 +795,6 @@ static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
dc->base.pc_next = pc + 4;
/* Decode an instruction */
-
-#if defined(CONFIG_USER_ONLY)
- /* FIXME: Is this needed ? */
- if (pc >= 0x1000 && pc < 0x2000) {
- t_gen_helper_raise_exception(dc, 0xaa);
- return;
- }
-#endif
-
code = cpu_ldl_code(env, pc);
op = get_opcode(code);
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index cb8ab4d676..40232201bb 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -3277,10 +3277,10 @@ GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
-GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
+GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_UQ))
#if defined(TARGET_PPC64)
-GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
+GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
#endif
#define GEN_QEMU_STORE_TL(stop, op) \
@@ -3311,10 +3311,10 @@ static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
-GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
+GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
#if defined(TARGET_PPC64)
-GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
+GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
#endif
#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
@@ -3351,7 +3351,7 @@ GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
#if defined(TARGET_PPC64)
-GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00)
+GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
#endif
#if defined(TARGET_PPC64)
@@ -3397,7 +3397,7 @@ GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
#if defined(TARGET_PPC64)
-GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1d, 0x04)
+GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
#endif
#if defined(TARGET_PPC64)
@@ -3807,7 +3807,7 @@ static void gen_lwat(DisasContext *ctx)
#ifdef TARGET_PPC64
static void gen_ldat(DisasContext *ctx)
{
- gen_ld_atomic(ctx, DEF_MEMOP(MO_Q));
+ gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
}
#endif
@@ -3890,7 +3890,7 @@ static void gen_stwat(DisasContext *ctx)
#ifdef TARGET_PPC64
static void gen_stdat(DisasContext *ctx)
{
- gen_st_atomic(ctx, DEF_MEMOP(MO_Q));
+ gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
}
#endif
@@ -3942,9 +3942,9 @@ STCX(stwcx_, DEF_MEMOP(MO_UL))
#if defined(TARGET_PPC64)
/* ldarx */
-LARX(ldarx, DEF_MEMOP(MO_Q))
+LARX(ldarx, DEF_MEMOP(MO_UQ))
/* stdcx. */
-STCX(stdcx_, DEF_MEMOP(MO_Q))
+STCX(stdcx_, DEF_MEMOP(MO_UQ))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
@@ -3988,15 +3988,15 @@ static void gen_lqarx(DisasContext *ctx)
return;
}
} else if (ctx->le_mode) {
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEUQ | MO_ALIGN_16);
tcg_gen_mov_tl(cpu_reserve, EA);
gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEUQ);
} else {
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16);
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEUQ | MO_ALIGN_16);
tcg_gen_mov_tl(cpu_reserve, EA);
gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEUQ);
}
tcg_temp_free(EA);
@@ -8018,7 +8018,7 @@ GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
#if defined(TARGET_PPC64)
-GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00)
+GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
#endif
#undef GEN_STX_E
@@ -8044,7 +8044,7 @@ GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
#if defined(TARGET_PPC64)
-GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1D, 0x04)
+GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
#endif
#undef GEN_CRLOGIC
diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc
index 7fecff4579..1aab32be03 100644
--- a/target/ppc/translate/fixedpoint-impl.c.inc
+++ b/target/ppc/translate/fixedpoint-impl.c.inc
@@ -137,7 +137,7 @@ static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed)
ctx->base.is_jmp = DISAS_NORETURN;
}
} else {
- mop = DEF_MEMOP(MO_Q);
+ mop = DEF_MEMOP(MO_UQ);
if (store) {
tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
} else {
@@ -205,11 +205,11 @@ TRANS64(LWAUX, do_ldst_X, true, false, MO_SL)
TRANS64(PLWA, do_ldst_PLS_D, false, false, MO_SL)
/* Load Doubleword */
-TRANS64(LD, do_ldst_D, false, false, MO_Q)
-TRANS64(LDX, do_ldst_X, false, false, MO_Q)
-TRANS64(LDU, do_ldst_D, true, false, MO_Q)
-TRANS64(LDUX, do_ldst_X, true, false, MO_Q)
-TRANS64(PLD, do_ldst_PLS_D, false, false, MO_Q)
+TRANS64(LD, do_ldst_D, false, false, MO_UQ)
+TRANS64(LDX, do_ldst_X, false, false, MO_UQ)
+TRANS64(LDU, do_ldst_D, true, false, MO_UQ)
+TRANS64(LDUX, do_ldst_X, true, false, MO_UQ)
+TRANS64(PLD, do_ldst_PLS_D, false, false, MO_UQ)
/* Load Quadword */
TRANS64(LQ, do_ldst_quad, false, false);
@@ -237,11 +237,11 @@ TRANS(STWUX, do_ldst_X, true, true, MO_UL)
TRANS(PSTW, do_ldst_PLS_D, false, true, MO_UL)
/* Store Doubleword */
-TRANS64(STD, do_ldst_D, false, true, MO_Q)
-TRANS64(STDX, do_ldst_X, false, true, MO_Q)
-TRANS64(STDU, do_ldst_D, true, true, MO_Q)
-TRANS64(STDUX, do_ldst_X, true, true, MO_Q)
-TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_Q)
+TRANS64(STD, do_ldst_D, false, true, MO_UQ)
+TRANS64(STDX, do_ldst_X, false, true, MO_UQ)
+TRANS64(STDU, do_ldst_D, true, true, MO_UQ)
+TRANS64(STDUX, do_ldst_X, true, true, MO_UQ)
+TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_UQ)
/* Store Quadword */
TRANS64(STQ, do_ldst_quad, true, false);
diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc
index 2baae5988f..c96769742e 100644
--- a/target/ppc/translate/fp-impl.c.inc
+++ b/target/ppc/translate/fp-impl.c.inc
@@ -863,7 +863,7 @@ static void gen_lfdepx(DisasContext *ctx)
EA = tcg_temp_new();
t0 = tcg_temp_new_i64();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_Q));
+ tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ));
set_fpr(rD(ctx->opcode), t0);
tcg_temp_free(EA);
tcg_temp_free_i64(t0);
@@ -1021,7 +1021,7 @@ static void gen_stfdepx(DisasContext *ctx)
t0 = tcg_temp_new_i64();
gen_addr_reg_index(ctx, EA);
get_fpr(t0, rD(ctx->opcode));
- tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_Q));
+ tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ));
tcg_temp_free(EA);
tcg_temp_free_i64(t0);
}
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index c08185e857..c636e38164 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -85,19 +85,19 @@ static void gen_lxvw4x(DisasContext *ctx)
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
tcg_gen_shri_i64(t1, t0, 32);
tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
tcg_gen_shri_i64(t1, t0, 32);
tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
} else {
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
}
set_cpu_vsr(xT(ctx->opcode), xth, true);
set_cpu_vsr(xT(ctx->opcode), xtl, false);
@@ -152,8 +152,8 @@ static void gen_lxvdsx(DisasContext *ctx)
gen_addr_reg_index(ctx, EA);
data = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
- tcg_gen_gvec_dup_i64(MO_Q, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
+ tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ));
+ tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
tcg_temp_free(EA);
tcg_temp_free_i64(data);
@@ -217,9 +217,9 @@ static void gen_lxvh8x(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
if (ctx->le_mode) {
gen_bswap16x8(xth, xtl, xth, xtl);
}
@@ -245,9 +245,9 @@ static void gen_lxvb16x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
set_cpu_vsr(xT(ctx->opcode), xth, true);
set_cpu_vsr(xT(ctx->opcode), xtl, false);
tcg_temp_free(EA);
@@ -382,17 +382,17 @@ static void gen_stxvw4x(DisasContext *ctx)
tcg_gen_shri_i64(t0, xsh, 32);
tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
- tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
tcg_gen_addi_tl(EA, EA, 8);
tcg_gen_shri_i64(t0, xsl, 32);
tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
- tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
} else {
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
}
tcg_temp_free(EA);
tcg_temp_free_i64(xsh);
@@ -421,15 +421,15 @@ static void gen_stxvh8x(DisasContext *ctx)
TCGv_i64 outl = tcg_temp_new_i64();
gen_bswap16x8(outh, outl, xsh, xsl);
- tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ);
tcg_temp_free_i64(outh);
tcg_temp_free_i64(outl);
} else {
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
}
tcg_temp_free(EA);
tcg_temp_free_i64(xsh);
@@ -453,9 +453,9 @@ static void gen_stxvb16x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
tcg_temp_free(EA);
tcg_temp_free_i64(xsh);
tcg_temp_free_i64(xsl);
@@ -2020,7 +2020,7 @@ static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
xt = tcg_temp_new_i64();
- mop = DEF_MEMOP(MO_Q);
+ mop = DEF_MEMOP(MO_UQ);
gen_set_access_type(ctx, ACCESS_INT);
ea = do_ea_calc(ctx, ra, displ);
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 6ef3314bce..9bc25d3055 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -42,6 +42,15 @@ const char * const riscv_int_regnames[] = {
"x28/t3", "x29/t4", "x30/t5", "x31/t6"
};
+const char * const riscv_int_regnamesh[] = {
+ "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
+ "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
+ "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
+ "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
+ "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
+ "x30h/t5h", "x31h/t6h"
+};
+
const char * const riscv_fpr_regnames[] = {
"f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
"f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
@@ -169,6 +178,19 @@ static void rv64_sifive_e_cpu_init(Object *obj)
set_priv_version(env, PRIV_VERSION_1_10_0);
qdev_prop_set_bit(DEVICE(obj), "mmu", false);
}
+
+static void rv128_base_cpu_init(Object *obj)
+{
+ if (qemu_tcg_mttcg_enabled()) {
+ /* Missing 128-bit aligned atomics */
+ error_report("128-bit RISC-V currently does not work with Multi "
+ "Threaded TCG. Please use: -accel tcg,thread=single");
+ exit(EXIT_FAILURE);
+ }
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ /* We set this in the realise function */
+ set_misa(env, MXL_RV128, 0);
+}
#else
static void rv32_base_cpu_init(Object *obj)
{
@@ -393,6 +415,9 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
case MXL_RV64:
info->print_insn = print_insn_riscv64;
break;
+ case MXL_RV128:
+ info->print_insn = print_insn_riscv128;
+ break;
default:
g_assert_not_reached();
}
@@ -455,6 +480,8 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
#ifdef TARGET_RISCV64
case MXL_RV64:
break;
+ case MXL_RV128:
+ break;
#endif
case MXL_RV32:
break;
@@ -627,6 +654,7 @@ static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
+ DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true),
DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true),
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
@@ -640,12 +668,12 @@ static Property riscv_cpu_properties[] = {
DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
- /* These are experimental so mark with 'x-' */
DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
- DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false),
+
+ /* These are experimental so mark with 'x-' */
DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
/* ePMP 0.9.3 */
DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
@@ -663,6 +691,7 @@ static gchar *riscv_gdb_arch_name(CPUState *cs)
case MXL_RV32:
return g_strdup("riscv:rv32");
case MXL_RV64:
+ case MXL_RV128:
return g_strdup("riscv:rv64");
default:
g_assert_not_reached();
@@ -817,6 +846,7 @@ static const TypeInfo riscv_cpu_type_infos[] = {
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
#endif
};
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index dc10f27093..4d63086765 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -25,6 +25,7 @@
#include "exec/cpu-defs.h"
#include "fpu/softfloat-types.h"
#include "qom/object.h"
+#include "qemu/int128.h"
#include "cpu_bits.h"
#define TCG_GUEST_DEFAULT_MO 0
@@ -38,6 +39,7 @@
#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any")
#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
+#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128")
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
@@ -112,6 +114,7 @@ FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1)
struct CPURISCVState {
target_ulong gpr[32];
+ target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
uint64_t fpr[32]; /* assume both F and D extensions */
/* vector coprocessor state. */
@@ -129,6 +132,8 @@ struct CPURISCVState {
target_ulong frm;
target_ulong badaddr;
+ uint32_t bins;
+
target_ulong guest_phys_fault_addr;
target_ulong priv_ver;
@@ -141,6 +146,9 @@ struct CPURISCVState {
uint32_t misa_ext; /* current extensions */
uint32_t misa_ext_mask; /* max ext for this cpu */
+ /* 128-bit helpers upper part return value */
+ target_ulong retxh;
+
uint32_t features;
#ifdef CONFIG_USER_ONLY
@@ -190,6 +198,10 @@ struct CPURISCVState {
target_ulong hgatp;
uint64_t htimedelta;
+ /* Upper 64-bits of 128-bit CSRs */
+ uint64_t mscratchh;
+ uint64_t sscratchh;
+
/* Virtual CSRs */
/*
* For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
@@ -344,6 +356,7 @@ static inline bool riscv_feature(CPURISCVState *env, int feature)
#include "cpu_user.h"
extern const char * const riscv_int_regnames[];
+extern const char * const riscv_int_regnamesh[];
extern const char * const riscv_fpr_regnames[];
const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
@@ -490,12 +503,23 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
target_ulong new_value,
target_ulong write_mask);
+RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
+ Int128 *ret_value,
+ Int128 new_value, Int128 write_mask);
+
+typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
+ Int128 *ret_value);
+typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
+ Int128 new_value);
+
typedef struct {
const char *name;
riscv_csr_predicate_fn predicate;
riscv_csr_read_fn read;
riscv_csr_write_fn write;
riscv_csr_op_fn op;
+ riscv_csr_read128_fn read128;
+ riscv_csr_write128_fn write128;
} riscv_csr_operations;
/* CSR function table constants */
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 1e31f4d35f..5a6d49aa64 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -401,6 +401,7 @@
#define MSTATUS32_SD 0x80000000
#define MSTATUS64_SD 0x8000000000000000ULL
+#define MSTATUSH128_SD 0x8000000000000000ULL
#define MISA32_MXL 0xC0000000
#define MISA64_MXL 0xC000000000000000ULL
@@ -423,6 +424,8 @@ typedef enum {
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
#define SSTATUS_MXR 0x00080000
+#define SSTATUS64_UXL 0x0000000300000000ULL
+
#define SSTATUS32_SD 0x80000000
#define SSTATUS64_SD 0x8000000000000000ULL
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 10f3baba53..434a83e66a 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -998,6 +998,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
+ bool write_gva = false;
uint64_t s;
/* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
@@ -1006,7 +1007,6 @@ void riscv_cpu_do_interrupt(CPUState *cs)
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
target_ulong deleg = async ? env->mideleg : env->medeleg;
- bool write_tval = false;
target_ulong tval = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
@@ -1035,9 +1035,12 @@ void riscv_cpu_do_interrupt(CPUState *cs)
case RISCV_EXCP_INST_PAGE_FAULT:
case RISCV_EXCP_LOAD_PAGE_FAULT:
case RISCV_EXCP_STORE_PAGE_FAULT:
- write_tval = true;
+ write_gva = true;
tval = env->badaddr;
break;
+ case RISCV_EXCP_ILLEGAL_INST:
+ tval = env->bins;
+ break;
default:
break;
}
@@ -1072,18 +1075,6 @@ void riscv_cpu_do_interrupt(CPUState *cs)
if (riscv_has_ext(env, RVH)) {
target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
- if (env->two_stage_lookup && write_tval) {
- /*
- * If we are writing a guest virtual address to stval, set
- * this to 1. If we are trapping to VS we will set this to 0
- * later.
- */
- env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 1);
- } else {
- /* For other HS-mode traps, we set this to 0. */
- env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
- }
-
if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
/* Trap to VS mode */
/*
@@ -1094,7 +1085,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
cause == IRQ_VS_EXT) {
cause = cause - 1;
}
- env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
+ write_gva = false;
} else if (riscv_cpu_virt_enabled(env)) {
/* Trap into HS mode, from virt */
riscv_cpu_swap_hypervisor_regs(env);
@@ -1103,6 +1094,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
riscv_cpu_virt_enabled(env));
+
htval = env->guest_phys_fault_addr;
riscv_cpu_set_virt_enabled(env, 0);
@@ -1110,7 +1102,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
/* Trap into HS mode */
env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
htval = env->guest_phys_fault_addr;
+ write_gva = false;
}
+ env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
}
s = env->mstatus;
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 146447eac5..adb3d4381d 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -481,7 +481,7 @@ static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
(1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
- SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
+ SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS | (target_ulong)SSTATUS64_UXL;
static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
static const target_ulong hip_writable_mask = MIP_VSSIP;
static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
@@ -527,6 +527,8 @@ static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
return status | MSTATUS32_SD;
case MXL_RV64:
return status | MSTATUS64_SD;
+ case MXL_RV128:
+ return MSTATUSH128_SD;
default:
g_assert_not_reached();
}
@@ -576,10 +578,11 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
mstatus = (mstatus & ~mask) | (val & mask);
- if (riscv_cpu_mxl(env) == MXL_RV64) {
+ RISCVMXL xl = riscv_cpu_mxl(env);
+ if (xl > MXL_RV32) {
/* SXL and UXL fields are for now read only */
- mstatus = set_field(mstatus, MSTATUS64_SXL, MXL_RV64);
- mstatus = set_field(mstatus, MSTATUS64_UXL, MXL_RV64);
+ mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
+ mstatus = set_field(mstatus, MSTATUS64_UXL, xl);
}
env->mstatus = mstatus;
@@ -608,6 +611,20 @@ static RISCVException write_mstatush(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus));
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_misa(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -765,6 +782,21 @@ static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
}
/* Machine Trap Handling */
+static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ *val = int128_make128(env->mscratch, env->mscratchh);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
+ Int128 val)
+{
+ env->mscratch = int128_getlo(val);
+ env->mscratchh = int128_gethi(val);
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_mscratch(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -844,6 +876,16 @@ static RISCVException rmw_mip(CPURISCVState *env, int csrno,
}
/* Supervisor Trap Setup */
+static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ uint64_t mask = sstatus_v1_10_mask;
+ uint64_t sstatus = env->mstatus & mask;
+
+ *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_sstatus(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -937,6 +979,21 @@ static RISCVException write_scounteren(CPURISCVState *env, int csrno,
}
/* Supervisor Trap Handling */
+static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ *val = int128_make128(env->sscratch, env->sscratchh);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
+ Int128 val)
+{
+ env->sscratch = int128_getlo(val);
+ env->sscratchh = int128_gethi(val);
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_sscratch(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -1737,16 +1794,13 @@ static RISCVException write_upmbase(CPURISCVState *env, int csrno,
* csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
*/
-RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
- target_ulong *ret_value,
- target_ulong new_value, target_ulong write_mask)
+static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
+ int csrno,
+ bool write_mask,
+ RISCVCPU *cpu)
{
- RISCVException ret;
- target_ulong old_value;
- RISCVCPU *cpu = env_archcpu(env);
- int read_only = get_field(csrno, 0xC00) == 3;
-
/* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
+ int read_only = get_field(csrno, 0xC00) == 3;
#if !defined(CONFIG_USER_ONLY)
int effective_priv = env->priv;
@@ -1778,10 +1832,17 @@ RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
if (!csr_ops[csrno].predicate) {
return RISCV_EXCP_ILLEGAL_INST;
}
- ret = csr_ops[csrno].predicate(env, csrno);
- if (ret != RISCV_EXCP_NONE) {
- return ret;
- }
+
+ return csr_ops[csrno].predicate(env, csrno);
+}
+
+static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value,
+ target_ulong write_mask)
+{
+ RISCVException ret;
+ target_ulong old_value;
/* execute combined read/write operation if it exists */
if (csr_ops[csrno].op) {
@@ -1817,6 +1878,92 @@ RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+
+ RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
+}
+
+static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
+ Int128 *ret_value,
+ Int128 new_value,
+ Int128 write_mask)
+{
+ RISCVException ret;
+ Int128 old_value;
+
+ /* read old value */
+ ret = csr_ops[csrno].read128(env, csrno, &old_value);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* write value if writable and write mask set, otherwise drop writes */
+ if (int128_nz(write_mask)) {
+ new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
+ int128_and(new_value, write_mask));
+ if (csr_ops[csrno].write128) {
+ ret = csr_ops[csrno].write128(env, csrno, new_value);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+ } else if (csr_ops[csrno].write) {
+ /* avoids having to write wrappers for all registers */
+ ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+ }
+ }
+
+ /* return old value */
+ if (ret_value) {
+ *ret_value = old_value;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
+ Int128 *ret_value,
+ Int128 new_value, Int128 write_mask)
+{
+ RISCVException ret;
+ RISCVCPU *cpu = env_archcpu(env);
+
+ ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ if (csr_ops[csrno].read128) {
+ return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
+ }
+
+ /*
+ * Fall back to 64-bit version for now, if the 128-bit alternative isn't
+ * at all defined.
+ * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
+ * significant), for those, this fallback is correctly handling the accesses
+ */
+ target_ulong old_value;
+ ret = riscv_csrrw_do64(env, csrno, &old_value,
+ int128_getlo(new_value),
+ int128_getlo(write_mask));
+ if (ret == RISCV_EXCP_NONE && ret_value) {
+ *ret_value = int128_make64(old_value);
+ }
+ return ret;
+}
+
/*
* Debugger support. If not in user mode, set env->debugger before the
* riscv_csrrw call and clear it after the call.
@@ -1878,8 +2025,10 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MHARTID] = { "mhartid", any, read_mhartid },
/* Machine Trap Setup */
- [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus },
- [CSR_MISA] = { "misa", any, read_misa, write_misa },
+ [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, NULL,
+ read_mstatus_i128 },
+ [CSR_MISA] = { "misa", any, read_misa, write_misa, NULL,
+ read_misa_i128 },
[CSR_MIDELEG] = { "mideleg", any, read_mideleg, write_mideleg },
[CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
[CSR_MIE] = { "mie", any, read_mie, write_mie },
@@ -1889,20 +2038,23 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush },
/* Machine Trap Handling */
- [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch },
+ [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, NULL,
+ read_mscratch_i128, write_mscratch_i128 },
[CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
[CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
[CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
[CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
/* Supervisor Trap Setup */
- [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus },
+ [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, NULL,
+ read_sstatus_i128 },
[CSR_SIE] = { "sie", smode, read_sie, write_sie },
[CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
[CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, write_scounteren },
/* Supervisor Trap Handling */
- [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch },
+ [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, NULL,
+ read_sscratch_i128, write_sscratch_i128 },
[CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
[CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
[CSR_STVAL] = { "stval", smode, read_stval, write_stval },
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
index 881ab33392..a5429b92d4 100644
--- a/target/riscv/gdbstub.c
+++ b/target/riscv/gdbstub.c
@@ -280,6 +280,11 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg)
int bitsize = 16 << env->misa_mxl_max;
int i;
+ /* Until gdb knows about 128-bit registers */
+ if (bitsize > 64) {
+ bitsize = 64;
+ }
+
g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.csr\">");
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index c15497e4a1..6cf6d6ce98 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -96,6 +96,9 @@ DEF_HELPER_FLAGS_1(fclass_h, TCG_CALL_NO_RWG_SE, tl, i64)
DEF_HELPER_2(csrr, tl, env, int)
DEF_HELPER_3(csrw, void, env, int, tl)
DEF_HELPER_4(csrrw, tl, env, int, tl, tl)
+DEF_HELPER_2(csrr_i128, tl, env, int)
+DEF_HELPER_4(csrw_i128, void, env, int, tl, tl)
+DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_2(sret, tl, env, tl)
DEF_HELPER_2(mret, tl, env, tl)
@@ -1101,3 +1104,9 @@ DEF_HELPER_5(vsext_vf2_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf4_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf4_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf8_d, void, ptr, ptr, ptr, env, i32)
+
+/* 128-bit integer multiplication and division */
+DEF_HELPER_5(divu_i128, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(divs_i128, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(remu_i128, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(rems_i128, tl, env, tl, tl, tl, tl)
diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode
index 2e9212663c..02c8f61b48 100644
--- a/target/riscv/insn16.decode
+++ b/target/riscv/insn16.decode
@@ -25,14 +25,17 @@
# Immediates:
%imm_ci 12:s1 2:5
%nzuimm_ciw 7:4 11:2 5:1 6:1 !function=ex_shift_2
+%uimm_cl_q 10:1 5:2 11:2 !function=ex_shift_4
%uimm_cl_d 5:2 10:3 !function=ex_shift_3
%uimm_cl_w 5:1 10:3 6:1 !function=ex_shift_2
%imm_cb 12:s1 5:2 2:1 10:2 3:2 !function=ex_shift_1
%imm_cj 12:s1 8:1 9:2 6:1 7:1 2:1 11:1 3:3 !function=ex_shift_1
%shimm_6bit 12:1 2:5 !function=ex_rvc_shifti
+%uimm_6bit_lq 2:4 12:1 6:1 !function=ex_shift_4
%uimm_6bit_ld 2:3 12:1 5:2 !function=ex_shift_3
%uimm_6bit_lw 2:2 12:1 4:3 !function=ex_shift_2
+%uimm_6bit_sq 7:4 11:2 !function=ex_shift_4
%uimm_6bit_sd 7:3 10:3 !function=ex_shift_3
%uimm_6bit_sw 7:2 9:4 !function=ex_shift_2
@@ -54,16 +57,20 @@
# Formats 16:
@cr .... ..... ..... .. &r rs2=%rs2_5 rs1=%rd %rd
@ci ... . ..... ..... .. &i imm=%imm_ci rs1=%rd %rd
+@cl_q ... . ..... ..... .. &i imm=%uimm_cl_q rs1=%rs1_3 rd=%rs2_3
@cl_d ... ... ... .. ... .. &i imm=%uimm_cl_d rs1=%rs1_3 rd=%rs2_3
@cl_w ... ... ... .. ... .. &i imm=%uimm_cl_w rs1=%rs1_3 rd=%rs2_3
@cs_2 ... ... ... .. ... .. &r rs2=%rs2_3 rs1=%rs1_3 rd=%rs1_3
+@cs_q ... ... ... .. ... .. &s imm=%uimm_cl_q rs1=%rs1_3 rs2=%rs2_3
@cs_d ... ... ... .. ... .. &s imm=%uimm_cl_d rs1=%rs1_3 rs2=%rs2_3
@cs_w ... ... ... .. ... .. &s imm=%uimm_cl_w rs1=%rs1_3 rs2=%rs2_3
@cj ... ........... .. &j imm=%imm_cj
@cb_z ... ... ... .. ... .. &b imm=%imm_cb rs1=%rs1_3 rs2=0
+@c_lqsp ... . ..... ..... .. &i imm=%uimm_6bit_lq rs1=2 %rd
@c_ldsp ... . ..... ..... .. &i imm=%uimm_6bit_ld rs1=2 %rd
@c_lwsp ... . ..... ..... .. &i imm=%uimm_6bit_lw rs1=2 %rd
+@c_sqsp ... . ..... ..... .. &s imm=%uimm_6bit_sq rs1=2 rs2=%rs2_5
@c_sdsp ... . ..... ..... .. &s imm=%uimm_6bit_sd rs1=2 rs2=%rs2_5
@c_swsp ... . ..... ..... .. &s imm=%uimm_6bit_sw rs1=2 rs2=%rs2_5
@c_li ... . ..... ..... .. &i imm=%imm_ci rs1=0 %rd
@@ -87,9 +94,15 @@
illegal 000 000 000 00 --- 00
addi 000 ... ... .. ... 00 @c_addi4spn
}
-fld 001 ... ... .. ... 00 @cl_d
+{
+ lq 001 ... ... .. ... 00 @cl_q
+ fld 001 ... ... .. ... 00 @cl_d
+}
lw 010 ... ... .. ... 00 @cl_w
-fsd 101 ... ... .. ... 00 @cs_d
+{
+ sq 101 ... ... .. ... 00 @cs_q
+ fsd 101 ... ... .. ... 00 @cs_d
+}
sw 110 ... ... .. ... 00 @cs_w
# *** RV32C and RV64C specific Standard Extension (Quadrant 0) ***
@@ -132,7 +145,10 @@ addw 100 1 11 ... 01 ... 01 @cs_2
# *** RV32/64C Standard Extension (Quadrant 2) ***
slli 000 . ..... ..... 10 @c_shift2
-fld 001 . ..... ..... 10 @c_ldsp
+{
+ lq 001 ... ... .. ... 10 @c_lqsp
+ fld 001 . ..... ..... 10 @c_ldsp
+}
{
illegal 010 - 00000 ----- 10 # c.lwsp, RES rd=0
lw 010 . ..... ..... 10 @c_lwsp
@@ -147,7 +163,10 @@ fld 001 . ..... ..... 10 @c_ldsp
jalr 100 1 ..... 00000 10 @c_jalr rd=1 # C.JALR
add 100 1 ..... ..... 10 @cr
}
-fsd 101 ...... ..... 10 @c_sdsp
+{
+ sq 101 ... ... .. ... 10 @c_sqsp
+ fsd 101 ...... ..... 10 @c_sdsp
+}
sw 110 . ..... ..... 10 @c_swsp
# *** RV32C and RV64C specific Standard Extension (Quadrant 2) ***
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 8617307b29..5bbedc254c 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -22,6 +22,7 @@
%rs1 15:5
%rd 7:5
%sh5 20:5
+%sh6 20:6
%sh7 20:7
%csr 20:12
@@ -91,6 +92,9 @@
# Formats 64:
@sh5 ....... ..... ..... ... ..... ....... &shift shamt=%sh5 %rs1 %rd
+# Formats 128:
+@sh6 ...... ...... ..... ... ..... ....... &shift shamt=%sh6 %rs1 %rd
+
# *** Privileged Instructions ***
ecall 000000000000 00000 000 00000 1110011
ebreak 000000000001 00000 000 00000 1110011
@@ -162,6 +166,20 @@ sllw 0000000 ..... ..... 001 ..... 0111011 @r
srlw 0000000 ..... ..... 101 ..... 0111011 @r
sraw 0100000 ..... ..... 101 ..... 0111011 @r
+# *** RV128I Base Instruction Set (in addition to RV64I) ***
+ldu ............ ..... 111 ..... 0000011 @i
+lq ............ ..... 010 ..... 0001111 @i
+sq ............ ..... 100 ..... 0100011 @s
+addid ............ ..... 000 ..... 1011011 @i
+sllid 000000 ...... ..... 001 ..... 1011011 @sh6
+srlid 000000 ...... ..... 101 ..... 1011011 @sh6
+sraid 010000 ...... ..... 101 ..... 1011011 @sh6
+addd 0000000 ..... ..... 000 ..... 1111011 @r
+subd 0100000 ..... ..... 000 ..... 1111011 @r
+slld 0000000 ..... ..... 001 ..... 1111011 @r
+srld 0000000 ..... ..... 101 ..... 1111011 @r
+srad 0100000 ..... ..... 101 ..... 1111011 @r
+
# *** RV32M Standard Extension ***
mul 0000001 ..... ..... 000 ..... 0110011 @r
mulh 0000001 ..... ..... 001 ..... 0110011 @r
@@ -179,6 +197,13 @@ divuw 0000001 ..... ..... 101 ..... 0111011 @r
remw 0000001 ..... ..... 110 ..... 0111011 @r
remuw 0000001 ..... ..... 111 ..... 0111011 @r
+# *** RV128M Standard Extension (in addition to RV64M) ***
+muld 0000001 ..... ..... 000 ..... 1111011 @r
+divd 0000001 ..... ..... 100 ..... 1111011 @r
+divud 0000001 ..... ..... 101 ..... 1111011 @r
+remd 0000001 ..... ..... 110 ..... 1111011 @r
+remud 0000001 ..... ..... 111 ..... 1111011 @r
+
# *** RV32A Standard Extension ***
lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld
sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
index 40fe132b04..86032fa9a7 100644
--- a/target/riscv/insn_trans/trans_rva.c.inc
+++ b/target/riscv/insn_trans/trans_rva.c.inc
@@ -162,65 +162,65 @@ static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_lr(ctx, a, MO_ALIGN | MO_TEQ);
+ return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ);
}
static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ));
+ return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ));
}
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
index c8d31907c5..810431a1d6 100644
--- a/target/riscv/insn_trans/trans_rvb.c.inc
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
@@ -86,43 +86,43 @@ static bool trans_cpop(DisasContext *ctx, arg_cpop *a)
static bool trans_andn(DisasContext *ctx, arg_andn *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_andc_tl);
+ return gen_logic(ctx, a, tcg_gen_andc_tl);
}
static bool trans_orn(DisasContext *ctx, arg_orn *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_orc_tl);
+ return gen_logic(ctx, a, tcg_gen_orc_tl);
}
static bool trans_xnor(DisasContext *ctx, arg_xnor *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_eqv_tl);
+ return gen_logic(ctx, a, tcg_gen_eqv_tl);
}
static bool trans_min(DisasContext *ctx, arg_min *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smin_tl);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smin_tl, NULL);
}
static bool trans_max(DisasContext *ctx, arg_max *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smax_tl);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smax_tl, NULL);
}
static bool trans_minu(DisasContext *ctx, arg_minu *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umin_tl);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umin_tl, NULL);
}
static bool trans_maxu(DisasContext *ctx, arg_maxu *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umax_tl);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umax_tl, NULL);
}
static bool trans_sext_b(DisasContext *ctx, arg_sext_b *a)
@@ -156,7 +156,7 @@ static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt)
static bool trans_bset(DisasContext *ctx, arg_bset *a)
{
REQUIRE_ZBS(ctx);
- return gen_shift(ctx, a, EXT_NONE, gen_bset);
+ return gen_shift(ctx, a, EXT_NONE, gen_bset, NULL);
}
static bool trans_bseti(DisasContext *ctx, arg_bseti *a)
@@ -178,7 +178,7 @@ static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt)
static bool trans_bclr(DisasContext *ctx, arg_bclr *a)
{
REQUIRE_ZBS(ctx);
- return gen_shift(ctx, a, EXT_NONE, gen_bclr);
+ return gen_shift(ctx, a, EXT_NONE, gen_bclr, NULL);
}
static bool trans_bclri(DisasContext *ctx, arg_bclri *a)
@@ -200,7 +200,7 @@ static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt)
static bool trans_binv(DisasContext *ctx, arg_binv *a)
{
REQUIRE_ZBS(ctx);
- return gen_shift(ctx, a, EXT_NONE, gen_binv);
+ return gen_shift(ctx, a, EXT_NONE, gen_binv, NULL);
}
static bool trans_binvi(DisasContext *ctx, arg_binvi *a)
@@ -218,7 +218,7 @@ static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt)
static bool trans_bext(DisasContext *ctx, arg_bext *a)
{
REQUIRE_ZBS(ctx);
- return gen_shift(ctx, a, EXT_NONE, gen_bext);
+ return gen_shift(ctx, a, EXT_NONE, gen_bext, NULL);
}
static bool trans_bexti(DisasContext *ctx, arg_bexti *a)
@@ -248,7 +248,7 @@ static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2)
static bool trans_ror(DisasContext *ctx, arg_ror *a)
{
REQUIRE_ZBB(ctx);
- return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotr_tl, gen_rorw);
+ return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotr_tl, gen_rorw, NULL);
}
static void gen_roriw(TCGv ret, TCGv arg1, target_long shamt)
@@ -266,7 +266,7 @@ static bool trans_rori(DisasContext *ctx, arg_rori *a)
{
REQUIRE_ZBB(ctx);
return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
- tcg_gen_rotri_tl, gen_roriw);
+ tcg_gen_rotri_tl, gen_roriw, NULL);
}
static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
@@ -290,7 +290,7 @@ static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
static bool trans_rol(DisasContext *ctx, arg_rol *a)
{
REQUIRE_ZBB(ctx);
- return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotl_tl, gen_rolw);
+ return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotl_tl, gen_rolw, NULL);
}
static void gen_rev8_32(TCGv ret, TCGv src1)
@@ -357,7 +357,7 @@ GEN_SHADD(3)
static bool trans_sh##SHAMT##add(DisasContext *ctx, arg_sh##SHAMT##add *a) \
{ \
REQUIRE_ZBA(ctx); \
- return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add); \
+ return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add, NULL); \
}
GEN_TRANS_SHADD(1)
@@ -405,7 +405,7 @@ static bool trans_rorw(DisasContext *ctx, arg_rorw *a)
REQUIRE_64BIT(ctx);
REQUIRE_ZBB(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_NONE, gen_rorw);
+ return gen_shift(ctx, a, EXT_NONE, gen_rorw, NULL);
}
static bool trans_roriw(DisasContext *ctx, arg_roriw *a)
@@ -413,7 +413,7 @@ static bool trans_roriw(DisasContext *ctx, arg_roriw *a)
REQUIRE_64BIT(ctx);
REQUIRE_ZBB(ctx);
ctx->ol = MXL_RV32;
- return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_roriw);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_roriw, NULL);
}
static bool trans_rolw(DisasContext *ctx, arg_rolw *a)
@@ -421,7 +421,7 @@ static bool trans_rolw(DisasContext *ctx, arg_rolw *a)
REQUIRE_64BIT(ctx);
REQUIRE_ZBB(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_NONE, gen_rolw);
+ return gen_shift(ctx, a, EXT_NONE, gen_rolw, NULL);
}
#define GEN_SHADD_UW(SHAMT) \
@@ -447,7 +447,7 @@ static bool trans_sh##SHAMT##add_uw(DisasContext *ctx, \
{ \
REQUIRE_64BIT(ctx); \
REQUIRE_ZBA(ctx); \
- return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add_uw); \
+ return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add_uw, NULL); \
}
GEN_TRANS_SHADD_UW(1)
@@ -466,7 +466,7 @@ static bool trans_add_uw(DisasContext *ctx, arg_add_uw *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_ZBA(ctx);
- return gen_arith(ctx, a, EXT_NONE, gen_add_uw);
+ return gen_arith(ctx, a, EXT_NONE, gen_add_uw, NULL);
}
static void gen_slli_uw(TCGv dest, TCGv src, target_long shamt)
@@ -478,13 +478,13 @@ static bool trans_slli_uw(DisasContext *ctx, arg_slli_uw *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_ZBA(ctx);
- return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_slli_uw);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_slli_uw, NULL);
}
static bool trans_clmul(DisasContext *ctx, arg_clmul *a)
{
REQUIRE_ZBC(ctx);
- return gen_arith(ctx, a, EXT_NONE, gen_helper_clmul);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_clmul, NULL);
}
static void gen_clmulh(TCGv dst, TCGv src1, TCGv src2)
@@ -496,11 +496,11 @@ static void gen_clmulh(TCGv dst, TCGv src1, TCGv src2)
static bool trans_clmulh(DisasContext *ctx, arg_clmulr *a)
{
REQUIRE_ZBC(ctx);
- return gen_arith(ctx, a, EXT_NONE, gen_clmulh);
+ return gen_arith(ctx, a, EXT_NONE, gen_clmulh, NULL);
}
static bool trans_clmulr(DisasContext *ctx, arg_clmulh *a)
{
REQUIRE_ZBC(ctx);
- return gen_arith(ctx, a, EXT_NONE, gen_helper_clmulr);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_clmulr, NULL);
}
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
index 64fb0046f7..ed444b042a 100644
--- a/target/riscv/insn_trans/trans_rvd.c.inc
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
@@ -33,7 +33,7 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a)
}
addr = gen_pm_adjust_address(ctx, addr);
- tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ);
mark_fs_dirty(ctx);
return true;
@@ -54,7 +54,7 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
}
addr = gen_pm_adjust_address(ctx, addr);
- tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ);
return true;
}
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
index ecbf77ff9c..cebcb3f8f6 100644
--- a/target/riscv/insn_trans/trans_rvh.c.inc
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
@@ -121,14 +121,14 @@ static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_EXT(ctx, RVH);
- return do_hlv(ctx, a, MO_TEQ);
+ return do_hlv(ctx, a, MO_TEUQ);
}
static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_EXT(ctx, RVH);
- return do_hsv(ctx, a, MO_TEQ);
+ return do_hsv(ctx, a, MO_TEUQ);
}
#ifndef CONFIG_USER_ONLY
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
index e51dbc41c5..3a0ae28fef 100644
--- a/target/riscv/insn_trans/trans_rvi.c.inc
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
@@ -26,14 +26,14 @@ static bool trans_illegal(DisasContext *ctx, arg_empty *a)
static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
{
- REQUIRE_64BIT(ctx);
- return trans_illegal(ctx, a);
+ REQUIRE_64_OR_128BIT(ctx);
+ return trans_illegal(ctx, a);
}
static bool trans_lui(DisasContext *ctx, arg_lui *a)
{
if (a->rd != 0) {
- tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm);
+ gen_set_gpri(ctx, a->rd, a->imm);
}
return true;
}
@@ -41,7 +41,7 @@ static bool trans_lui(DisasContext *ctx, arg_lui *a)
static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
{
if (a->rd != 0) {
- tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm + ctx->base.pc_next);
+ gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
}
return true;
}
@@ -82,13 +82,103 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
return true;
}
+static TCGCond gen_compare_i128(bool bz, TCGv rl,
+ TCGv al, TCGv ah, TCGv bl, TCGv bh,
+ TCGCond cond)
+{
+ TCGv rh = tcg_temp_new();
+ bool invert = false;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ if (bz) {
+ tcg_gen_or_tl(rl, al, ah);
+ } else {
+ tcg_gen_xor_tl(rl, al, bl);
+ tcg_gen_xor_tl(rh, ah, bh);
+ tcg_gen_or_tl(rl, rl, rh);
+ }
+ break;
+
+ case TCG_COND_GE:
+ case TCG_COND_LT:
+ if (bz) {
+ tcg_gen_mov_tl(rl, ah);
+ } else {
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
+ tcg_gen_xor_tl(rl, rh, ah);
+ tcg_gen_xor_tl(tmp, ah, bh);
+ tcg_gen_and_tl(rl, rl, tmp);
+ tcg_gen_xor_tl(rl, rh, rl);
+
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case TCG_COND_LTU:
+ invert = true;
+ /* fallthrough */
+ case TCG_COND_GEU:
+ {
+ TCGv tmp = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+ TCGv one = tcg_constant_tl(1);
+
+ cond = TCG_COND_NE;
+ /* borrow in to second word */
+ tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
+ /* seed third word with 1, which will be result */
+ tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
+ tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
+
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ if (invert) {
+ cond = tcg_invert_cond(cond);
+ }
+
+ tcg_temp_free(rh);
+ return cond;
+}
+
+static void gen_setcond_i128(TCGv rl, TCGv rh,
+ TCGv src1l, TCGv src1h,
+ TCGv src2l, TCGv src2h,
+ TCGCond cond)
+{
+ cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
+ tcg_gen_setcondi_tl(cond, rl, rl, 0);
+ tcg_gen_movi_tl(rh, 0);
+}
+
static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
{
TCGLabel *l = gen_new_label();
TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
- tcg_gen_brcond_tl(cond, src1, src2, l);
+ if (get_xl(ctx) == MXL_RV128) {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv src2h = get_gprh(ctx, a->rs2);
+ TCGv tmp = tcg_temp_new();
+
+ cond = gen_compare_i128(a->rs2 == 0,
+ tmp, src1, src1h, src2, src2h, cond);
+ tcg_gen_brcondi_tl(cond, tmp, 0, l);
+
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_brcond_tl(cond, src1, src2, l);
+ }
gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
gen_set_label(l); /* branch taken */
@@ -134,7 +224,7 @@ static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
return gen_branch(ctx, a, TCG_COND_GEU);
}
-static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
+static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
{
TCGv dest = dest_gpr(ctx, a->rd);
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
@@ -151,6 +241,45 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
return true;
}
+/* Compute only 64-bit addresses to use the address translation mechanism */
+static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
+{
+ TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv destl = dest_gpr(ctx, a->rd);
+ TCGv desth = dest_gprh(ctx, a->rd);
+ TCGv addrl = tcg_temp_new();
+
+ tcg_gen_addi_tl(addrl, src1l, a->imm);
+
+ if ((memop & MO_SIZE) <= MO_64) {
+ tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
+ if (memop & MO_SIGN) {
+ tcg_gen_sari_tl(desth, destl, 63);
+ } else {
+ tcg_gen_movi_tl(desth, 0);
+ }
+ } else {
+ /* assume little-endian memory access for now */
+ tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_addi_tl(addrl, addrl, 8);
+ tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
+ }
+
+ gen_set_gpr128(ctx, a->rd, destl, desth);
+
+ tcg_temp_free(addrl);
+ return true;
+}
+
+static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
+{
+ if (get_xl(ctx) == MXL_RV128) {
+ return gen_load_i128(ctx, a, memop);
+ } else {
+ return gen_load_tl(ctx, a, memop);
+ }
+}
+
static bool trans_lb(DisasContext *ctx, arg_lb *a)
{
return gen_load(ctx, a, MO_SB);
@@ -166,6 +295,18 @@ static bool trans_lw(DisasContext *ctx, arg_lw *a)
return gen_load(ctx, a, MO_TESL);
}
+static bool trans_ld(DisasContext *ctx, arg_ld *a)
+{
+ REQUIRE_64_OR_128BIT(ctx);
+ return gen_load(ctx, a, MO_TESQ);
+}
+
+static bool trans_lq(DisasContext *ctx, arg_lq *a)
+{
+ REQUIRE_128BIT(ctx);
+ return gen_load(ctx, a, MO_TEUO);
+}
+
static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
{
return gen_load(ctx, a, MO_UB);
@@ -176,7 +317,19 @@ static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
return gen_load(ctx, a, MO_TEUW);
}
-static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
+static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
+{
+ REQUIRE_64_OR_128BIT(ctx);
+ return gen_load(ctx, a, MO_TEUL);
+}
+
+static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
+{
+ REQUIRE_128BIT(ctx);
+ return gen_load(ctx, a, MO_TEUQ);
+}
+
+static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
{
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
@@ -192,6 +345,37 @@ static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
return true;
}
+static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
+{
+ TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
+ TCGv src2h = get_gprh(ctx, a->rs2);
+ TCGv addrl = tcg_temp_new();
+
+ tcg_gen_addi_tl(addrl, src1l, a->imm);
+
+ if ((memop & MO_SIZE) <= MO_64) {
+ tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
+ } else {
+ /* little-endian memory access assumed for now */
+ tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_addi_tl(addrl, addrl, 8);
+ tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
+ }
+
+ tcg_temp_free(addrl);
+ return true;
+}
+
+static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
+{
+ if (get_xl(ctx) == MXL_RV128) {
+ return gen_store_i128(ctx, a, memop);
+ } else {
+ return gen_store_tl(ctx, a, memop);
+ }
+}
+
static bool trans_sb(DisasContext *ctx, arg_sb *a)
{
return gen_store(ctx, a, MO_SB);
@@ -207,27 +391,50 @@ static bool trans_sw(DisasContext *ctx, arg_sw *a)
return gen_store(ctx, a, MO_TESL);
}
-static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
+static bool trans_sd(DisasContext *ctx, arg_sd *a)
{
- REQUIRE_64BIT(ctx);
- return gen_load(ctx, a, MO_TEUL);
+ REQUIRE_64_OR_128BIT(ctx);
+ return gen_store(ctx, a, MO_TEUQ);
}
-static bool trans_ld(DisasContext *ctx, arg_ld *a)
+static bool trans_sq(DisasContext *ctx, arg_sq *a)
{
- REQUIRE_64BIT(ctx);
- return gen_load(ctx, a, MO_TEQ);
+ REQUIRE_128BIT(ctx);
+ return gen_store(ctx, a, MO_TEUO);
}
-static bool trans_sd(DisasContext *ctx, arg_sd *a)
+static bool trans_addd(DisasContext *ctx, arg_addd *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
+}
+
+static bool trans_addid(DisasContext *ctx, arg_addid *a)
{
- REQUIRE_64BIT(ctx);
- return gen_store(ctx, a, MO_TEQ);
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
+}
+
+static bool trans_subd(DisasContext *ctx, arg_subd *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
+}
+
+static void gen_addi2_i128(TCGv retl, TCGv reth,
+ TCGv srcl, TCGv srch, target_long imm)
+{
+ TCGv imml = tcg_constant_tl(imm);
+ TCGv immh = tcg_constant_tl(-(imm < 0));
+ tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
}
static bool trans_addi(DisasContext *ctx, arg_addi *a)
{
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
}
static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
@@ -235,39 +442,64 @@ static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
}
+static void gen_slt_i128(TCGv retl, TCGv reth,
+ TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
+{
+ gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
+}
+
static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
{
tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
}
+static void gen_sltu_i128(TCGv retl, TCGv reth,
+ TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
+{
+ gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
+}
+
static bool trans_slti(DisasContext *ctx, arg_slti *a)
{
- return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt);
+ return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
}
static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
{
- return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu);
+ return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
}
static bool trans_xori(DisasContext *ctx, arg_xori *a)
{
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_xori_tl);
+ return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
}
static bool trans_ori(DisasContext *ctx, arg_ori *a)
{
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_ori_tl);
+ return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
}
static bool trans_andi(DisasContext *ctx, arg_andi *a)
{
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_andi_tl);
+ return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
+}
+
+static void gen_slli_i128(TCGv retl, TCGv reth,
+ TCGv src1l, TCGv src1h,
+ target_long shamt)
+{
+ if (shamt >= 64) {
+ tcg_gen_shli_tl(reth, src1l, shamt - 64);
+ tcg_gen_movi_tl(retl, 0);
+ } else {
+ tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
+ tcg_gen_shli_tl(retl, src1l, shamt);
+ }
}
static bool trans_slli(DisasContext *ctx, arg_slli *a)
{
- return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
}
static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
@@ -275,10 +507,23 @@ static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
}
+static void gen_srli_i128(TCGv retl, TCGv reth,
+ TCGv src1l, TCGv src1h,
+ target_long shamt)
+{
+ if (shamt >= 64) {
+ tcg_gen_shri_tl(retl, src1h, shamt - 64);
+ tcg_gen_movi_tl(reth, 0);
+ } else {
+ tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
+ tcg_gen_shri_tl(reth, src1h, shamt);
+ }
+}
+
static bool trans_srli(DisasContext *ctx, arg_srli *a)
{
return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
- tcg_gen_shri_tl, gen_srliw);
+ tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
}
static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
@@ -286,125 +531,287 @@ static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
}
+static void gen_srai_i128(TCGv retl, TCGv reth,
+ TCGv src1l, TCGv src1h,
+ target_long shamt)
+{
+ if (shamt >= 64) {
+ tcg_gen_sari_tl(retl, src1h, shamt - 64);
+ tcg_gen_sari_tl(reth, src1h, 63);
+ } else {
+ tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
+ tcg_gen_sari_tl(reth, src1h, shamt);
+ }
+}
+
static bool trans_srai(DisasContext *ctx, arg_srai *a)
{
return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
- tcg_gen_sari_tl, gen_sraiw);
+ tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
}
static bool trans_add(DisasContext *ctx, arg_add *a)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
}
static bool trans_sub(DisasContext *ctx, arg_sub *a)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
+}
+
+static void gen_sll_i128(TCGv destl, TCGv desth,
+ TCGv src1l, TCGv src1h, TCGv shamt)
+{
+ TCGv ls = tcg_temp_new();
+ TCGv rs = tcg_temp_new();
+ TCGv hs = tcg_temp_new();
+ TCGv ll = tcg_temp_new();
+ TCGv lr = tcg_temp_new();
+ TCGv h0 = tcg_temp_new();
+ TCGv h1 = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_andi_tl(hs, shamt, 64);
+ tcg_gen_andi_tl(ls, shamt, 63);
+ tcg_gen_neg_tl(shamt, shamt);
+ tcg_gen_andi_tl(rs, shamt, 63);
+
+ tcg_gen_shl_tl(ll, src1l, ls);
+ tcg_gen_shl_tl(h0, src1h, ls);
+ tcg_gen_shr_tl(lr, src1l, rs);
+ tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
+ tcg_gen_or_tl(h1, h0, lr);
+
+ tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
+ tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
+
+ tcg_temp_free(ls);
+ tcg_temp_free(rs);
+ tcg_temp_free(hs);
+ tcg_temp_free(ll);
+ tcg_temp_free(lr);
+ tcg_temp_free(h0);
+ tcg_temp_free(h1);
}
static bool trans_sll(DisasContext *ctx, arg_sll *a)
{
- return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
}
static bool trans_slt(DisasContext *ctx, arg_slt *a)
{
- return gen_arith(ctx, a, EXT_SIGN, gen_slt);
+ return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
}
static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
{
- return gen_arith(ctx, a, EXT_SIGN, gen_sltu);
+ return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
}
-static bool trans_xor(DisasContext *ctx, arg_xor *a)
+static void gen_srl_i128(TCGv destl, TCGv desth,
+ TCGv src1l, TCGv src1h, TCGv shamt)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_xor_tl);
+ TCGv ls = tcg_temp_new();
+ TCGv rs = tcg_temp_new();
+ TCGv hs = tcg_temp_new();
+ TCGv ll = tcg_temp_new();
+ TCGv lr = tcg_temp_new();
+ TCGv h0 = tcg_temp_new();
+ TCGv h1 = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_andi_tl(hs, shamt, 64);
+ tcg_gen_andi_tl(rs, shamt, 63);
+ tcg_gen_neg_tl(shamt, shamt);
+ tcg_gen_andi_tl(ls, shamt, 63);
+
+ tcg_gen_shr_tl(lr, src1l, rs);
+ tcg_gen_shr_tl(h1, src1h, rs);
+ tcg_gen_shl_tl(ll, src1h, ls);
+ tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
+ tcg_gen_or_tl(h0, ll, lr);
+
+ tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
+ tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
+
+ tcg_temp_free(ls);
+ tcg_temp_free(rs);
+ tcg_temp_free(hs);
+ tcg_temp_free(ll);
+ tcg_temp_free(lr);
+ tcg_temp_free(h0);
+ tcg_temp_free(h1);
}
static bool trans_srl(DisasContext *ctx, arg_srl *a)
{
- return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
+}
+
+static void gen_sra_i128(TCGv destl, TCGv desth,
+ TCGv src1l, TCGv src1h, TCGv shamt)
+{
+ TCGv ls = tcg_temp_new();
+ TCGv rs = tcg_temp_new();
+ TCGv hs = tcg_temp_new();
+ TCGv ll = tcg_temp_new();
+ TCGv lr = tcg_temp_new();
+ TCGv h0 = tcg_temp_new();
+ TCGv h1 = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_andi_tl(hs, shamt, 64);
+ tcg_gen_andi_tl(rs, shamt, 63);
+ tcg_gen_neg_tl(shamt, shamt);
+ tcg_gen_andi_tl(ls, shamt, 63);
+
+ tcg_gen_shr_tl(lr, src1l, rs);
+ tcg_gen_sar_tl(h1, src1h, rs);
+ tcg_gen_shl_tl(ll, src1h, ls);
+ tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
+ tcg_gen_or_tl(h0, ll, lr);
+ tcg_gen_sari_tl(lr, src1h, 63);
+
+ tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
+ tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
+
+ tcg_temp_free(ls);
+ tcg_temp_free(rs);
+ tcg_temp_free(hs);
+ tcg_temp_free(ll);
+ tcg_temp_free(lr);
+ tcg_temp_free(h0);
+ tcg_temp_free(h1);
}
static bool trans_sra(DisasContext *ctx, arg_sra *a)
{
- return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
+}
+
+static bool trans_xor(DisasContext *ctx, arg_xor *a)
+{
+ return gen_logic(ctx, a, tcg_gen_xor_tl);
}
static bool trans_or(DisasContext *ctx, arg_or *a)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_or_tl);
+ return gen_logic(ctx, a, tcg_gen_or_tl);
}
static bool trans_and(DisasContext *ctx, arg_and *a)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_and_tl);
+ return gen_logic(ctx, a, tcg_gen_and_tl);
}
static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
}
static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
}
static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
}
static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
+}
+
+static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
+}
+
+static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
+}
+
+static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl, NULL);
}
static bool trans_addw(DisasContext *ctx, arg_addw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
}
static bool trans_subw(DisasContext *ctx, arg_subw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
}
static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
}
static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
}
static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
}
+static bool trans_slld(DisasContext *ctx, arg_slld *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
+}
+
+static bool trans_srld(DisasContext *ctx, arg_srld *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
+}
+
+static bool trans_srad(DisasContext *ctx, arg_srad *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
+}
+
+
static bool trans_fence(DisasContext *ctx, arg_fence *a)
{
/* FENCE is a full memory barrier. */
@@ -474,20 +881,78 @@ static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
return do_csr_post(ctx);
}
-static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
+static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
{
- TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv destl = dest_gpr(ctx, rd);
+ TCGv desth = dest_gprh(ctx, rd);
+ TCGv_i32 csr = tcg_constant_i32(rc);
- /*
- * If rd == 0, the insn shall not read the csr, nor cause any of the
- * side effects that might occur on a csr read.
- */
- if (a->rd == 0) {
- return do_csrw(ctx, a->csr, src);
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_csrr_i128(destl, cpu_env, csr);
+ tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_set_gpr128(ctx, rd, destl, desth);
+ return do_csr_post(ctx);
+}
+
+static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
+{
+ TCGv_i32 csr = tcg_constant_i32(rc);
+
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
}
+ gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
+ return do_csr_post(ctx);
+}
+
+static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
+ TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
+{
+ TCGv destl = dest_gpr(ctx, rd);
+ TCGv desth = dest_gprh(ctx, rd);
+ TCGv_i32 csr = tcg_constant_i32(rc);
+
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
+ tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_set_gpr128(ctx, rd, destl, desth);
+ return do_csr_post(ctx);
+}
- TCGv mask = tcg_constant_tl(-1);
- return do_csrrw(ctx, a->rd, a->csr, src, mask);
+static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
+{
+ if (get_xl(ctx) < MXL_RV128) {
+ TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
+
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw(ctx, a->csr, src);
+ }
+
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw(ctx, a->rd, a->csr, src, mask);
+ } else {
+ TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv srch = get_gprh(ctx, a->rs1);
+
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw_i128(ctx, a->csr, srcl, srch);
+ }
+
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
+ }
}
static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
@@ -499,13 +964,24 @@ static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
* a zero value, the instruction will still attempt to write the
* unmodified value back to the csr and will cause side effects.
*/
- if (a->rs1 == 0) {
- return do_csrr(ctx, a->rd, a->csr);
+ if (get_xl(ctx) < MXL_RV128) {
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
+
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
+ return do_csrrw(ctx, a->rd, a->csr, ones, mask);
+ } else {
+ if (a->rs1 == 0) {
+ return do_csrr_i128(ctx, a->rd, a->csr);
+ }
+
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
+ TCGv maskh = get_gprh(ctx, a->rs1);
+ return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
}
-
- TCGv ones = tcg_constant_tl(-1);
- TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
- return do_csrrw(ctx, a->rd, a->csr, ones, mask);
}
static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
@@ -517,28 +993,54 @@ static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
* a zero value, the instruction will still attempt to write the
* unmodified value back to the csr and will cause side effects.
*/
- if (a->rs1 == 0) {
- return do_csrr(ctx, a->rd, a->csr);
- }
+ if (get_xl(ctx) < MXL_RV128) {
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
- TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
- return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+ TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
+ return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+ } else {
+ if (a->rs1 == 0) {
+ return do_csrr_i128(ctx, a->rd, a->csr);
+ }
+
+ TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
+ TCGv maskh = get_gprh(ctx, a->rs1);
+ return do_csrrw_i128(ctx, a->rd, a->csr,
+ ctx->zero, ctx->zero, maskl, maskh);
+ }
}
static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
{
- TCGv src = tcg_constant_tl(a->rs1);
+ if (get_xl(ctx) < MXL_RV128) {
+ TCGv src = tcg_constant_tl(a->rs1);
- /*
- * If rd == 0, the insn shall not read the csr, nor cause any of the
- * side effects that might occur on a csr read.
- */
- if (a->rd == 0) {
- return do_csrw(ctx, a->csr, src);
- }
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw(ctx, a->csr, src);
+ }
- TCGv mask = tcg_constant_tl(-1);
- return do_csrrw(ctx, a->rd, a->csr, src, mask);
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw(ctx, a->rd, a->csr, src, mask);
+ } else {
+ TCGv src = tcg_constant_tl(a->rs1);
+
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw_i128(ctx, a->csr, src, ctx->zero);
+ }
+
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
+ }
}
static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
@@ -550,16 +1052,26 @@ static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
* a zero value, the instruction will still attempt to write the
* unmodified value back to the csr and will cause side effects.
*/
- if (a->rs1 == 0) {
- return do_csrr(ctx, a->rd, a->csr);
- }
+ if (get_xl(ctx) < MXL_RV128) {
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
+
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw(ctx, a->rd, a->csr, ones, mask);
+ } else {
+ if (a->rs1 == 0) {
+ return do_csrr_i128(ctx, a->rd, a->csr);
+ }
- TCGv ones = tcg_constant_tl(-1);
- TCGv mask = tcg_constant_tl(a->rs1);
- return do_csrrw(ctx, a->rd, a->csr, ones, mask);
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
+ }
}
-static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a)
+static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
{
/*
* If rs1 == 0, the insn shall not write to the csr at all, nor
@@ -568,10 +1080,20 @@ static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a)
* a zero value, the instruction will still attempt to write the
* unmodified value back to the csr and will cause side effects.
*/
- if (a->rs1 == 0) {
- return do_csrr(ctx, a->rd, a->csr);
- }
+ if (get_xl(ctx) < MXL_RV128) {
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
- TCGv mask = tcg_constant_tl(a->rs1);
- return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+ } else {
+ if (a->rs1 == 0) {
+ return do_csrr_i128(ctx, a->rd, a->csr);
+ }
+
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw_i128(ctx, a->rd, a->csr,
+ ctx->zero, ctx->zero, mask, ctx->zero);
+ }
}
diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc
index 2af0e5c139..16b029edf0 100644
--- a/target/riscv/insn_trans/trans_rvm.c.inc
+++ b/target/riscv/insn_trans/trans_rvm.c.inc
@@ -18,11 +18,79 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+static void gen_mulhu_i128(TCGv r2, TCGv r3, TCGv al, TCGv ah, TCGv bl, TCGv bh)
+{
+ TCGv tmpl = tcg_temp_new();
+ TCGv tmph = tcg_temp_new();
+ TCGv r0 = tcg_temp_new();
+ TCGv r1 = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_mulu2_tl(r0, r1, al, bl);
+
+ tcg_gen_mulu2_tl(tmpl, tmph, al, bh);
+ tcg_gen_add2_tl(r1, r2, r1, zero, tmpl, tmph);
+ tcg_gen_mulu2_tl(tmpl, tmph, ah, bl);
+ tcg_gen_add2_tl(r1, tmph, r1, r2, tmpl, tmph);
+ /* Overflow detection into r3 */
+ tcg_gen_setcond_tl(TCG_COND_LTU, r3, tmph, r2);
+
+ tcg_gen_mov_tl(r2, tmph);
+
+ tcg_gen_mulu2_tl(tmpl, tmph, ah, bh);
+ tcg_gen_add2_tl(r2, r3, r2, r3, tmpl, tmph);
+
+ tcg_temp_free(tmpl);
+ tcg_temp_free(tmph);
+}
+
+static void gen_mul_i128(TCGv rl, TCGv rh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ TCGv tmpl = tcg_temp_new();
+ TCGv tmph = tcg_temp_new();
+ TCGv tmpx = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_mulu2_tl(rl, rh, rs1l, rs2l);
+ tcg_gen_mulu2_tl(tmpl, tmph, rs1l, rs2h);
+ tcg_gen_add2_tl(rh, tmpx, rh, zero, tmpl, tmph);
+ tcg_gen_mulu2_tl(tmpl, tmph, rs1h, rs2l);
+ tcg_gen_add2_tl(rh, tmph, rh, tmpx, tmpl, tmph);
+
+ tcg_temp_free(tmpl);
+ tcg_temp_free(tmph);
+ tcg_temp_free(tmpx);
+}
static bool trans_mul(DisasContext *ctx, arg_mul *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl, gen_mul_i128);
+}
+
+static void gen_mulh_i128(TCGv rl, TCGv rh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ TCGv t0l = tcg_temp_new();
+ TCGv t0h = tcg_temp_new();
+ TCGv t1l = tcg_temp_new();
+ TCGv t1h = tcg_temp_new();
+
+ gen_mulhu_i128(rl, rh, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_sari_tl(t0h, rs1h, 63);
+ tcg_gen_and_tl(t0l, t0h, rs2l);
+ tcg_gen_and_tl(t0h, t0h, rs2h);
+ tcg_gen_sari_tl(t1h, rs2h, 63);
+ tcg_gen_and_tl(t1l, t1h, rs1l);
+ tcg_gen_and_tl(t1h, t1h, rs1h);
+ tcg_gen_sub2_tl(t0l, t0h, rl, rh, t0l, t0h);
+ tcg_gen_sub2_tl(rl, rh, t0l, t0h, t1l, t1h);
+
+ tcg_temp_free(t0l);
+ tcg_temp_free(t0h);
+ tcg_temp_free(t1l);
+ tcg_temp_free(t1h);
}
static void gen_mulh(TCGv ret, TCGv s1, TCGv s2)
@@ -42,7 +110,25 @@ static void gen_mulh_w(TCGv ret, TCGv s1, TCGv s2)
static bool trans_mulh(DisasContext *ctx, arg_mulh *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith_per_ol(ctx, a, EXT_SIGN, gen_mulh, gen_mulh_w);
+ return gen_arith_per_ol(ctx, a, EXT_SIGN, gen_mulh, gen_mulh_w,
+ gen_mulh_i128);
+}
+
+static void gen_mulhsu_i128(TCGv rl, TCGv rh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+
+ TCGv t0l = tcg_temp_new();
+ TCGv t0h = tcg_temp_new();
+
+ gen_mulhu_i128(rl, rh, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_sari_tl(t0h, rs1h, 63);
+ tcg_gen_and_tl(t0l, t0h, rs2l);
+ tcg_gen_and_tl(t0h, t0h, rs2h);
+ tcg_gen_sub2_tl(rl, rh, rl, rh, t0l, t0h);
+
+ tcg_temp_free(t0l);
+ tcg_temp_free(t0h);
}
static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
@@ -76,7 +162,8 @@ static void gen_mulhsu_w(TCGv ret, TCGv arg1, TCGv arg2)
static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith_per_ol(ctx, a, EXT_NONE, gen_mulhsu, gen_mulhsu_w);
+ return gen_arith_per_ol(ctx, a, EXT_NONE, gen_mulhsu, gen_mulhsu_w,
+ gen_mulhsu_i128);
}
static void gen_mulhu(TCGv ret, TCGv s1, TCGv s2)
@@ -91,7 +178,15 @@ static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a)
{
REQUIRE_EXT(ctx, RVM);
/* gen_mulh_w works for either sign as input. */
- return gen_arith_per_ol(ctx, a, EXT_ZERO, gen_mulhu, gen_mulh_w);
+ return gen_arith_per_ol(ctx, a, EXT_ZERO, gen_mulhu, gen_mulh_w,
+ gen_mulhu_i128);
+}
+
+static void gen_div_i128(TCGv rdl, TCGv rdh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ gen_helper_divs_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
}
static void gen_div(TCGv ret, TCGv source1, TCGv source2)
@@ -130,7 +225,14 @@ static void gen_div(TCGv ret, TCGv source1, TCGv source2)
static bool trans_div(DisasContext *ctx, arg_div *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_SIGN, gen_div);
+ return gen_arith(ctx, a, EXT_SIGN, gen_div, gen_div_i128);
+}
+
+static void gen_divu_i128(TCGv rdl, TCGv rdh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ gen_helper_divu_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
}
static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
@@ -158,7 +260,14 @@ static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
static bool trans_divu(DisasContext *ctx, arg_divu *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_ZERO, gen_divu);
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu, gen_divu_i128);
+}
+
+static void gen_rem_i128(TCGv rdl, TCGv rdh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ gen_helper_rems_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
}
static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
@@ -199,7 +308,14 @@ static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
static bool trans_rem(DisasContext *ctx, arg_rem *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_SIGN, gen_rem);
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem, gen_rem_i128);
+}
+
+static void gen_remu_i128(TCGv rdl, TCGv rdh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ gen_helper_remu_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
}
static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
@@ -227,45 +343,85 @@ static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
static bool trans_remu(DisasContext *ctx, arg_remu *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_ZERO, gen_remu);
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu, gen_remu_i128);
}
static bool trans_mulw(DisasContext *ctx, arg_mulw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl, NULL);
}
static bool trans_divw(DisasContext *ctx, arg_divw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_SIGN, gen_div);
+ return gen_arith(ctx, a, EXT_SIGN, gen_div, NULL);
}
static bool trans_divuw(DisasContext *ctx, arg_divuw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_ZERO, gen_divu);
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu, NULL);
}
static bool trans_remw(DisasContext *ctx, arg_remw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_SIGN, gen_rem);
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem, NULL);
}
static bool trans_remuw(DisasContext *ctx, arg_remuw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_ZERO, gen_remu);
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu, NULL);
+}
+
+static bool trans_muld(DisasContext *ctx, arg_muld *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_mul_tl, NULL);
+}
+
+static bool trans_divd(DisasContext *ctx, arg_divd *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_SIGN, gen_div, NULL);
+}
+
+static bool trans_divud(DisasContext *ctx, arg_divud *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu, NULL);
+}
+
+static bool trans_remd(DisasContext *ctx, arg_remd *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem, NULL);
+}
+
+static bool trans_remud(DisasContext *ctx, arg_remud *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu, NULL);
}
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index 5e3f7fdb77..6c285c958b 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -2254,7 +2254,8 @@ GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
@@ -2292,7 +2293,8 @@ GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
@@ -2321,7 +2323,8 @@ GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
}
@@ -2359,7 +2362,8 @@ GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_dd(s, a->rd, a->rs2, a->vm);
}
@@ -2609,16 +2613,27 @@ GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ)
static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
{
return require_rvv(s) &&
- require_scale_rvf(s) &&
- (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
-#define GEN_OPFV_WIDEN_TRANS(NAME, HELPER, FRM) \
+static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
+{
+ return opfv_widen_check(s, a) &&
+ require_rvf(s);
+}
+
+static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
+{
+ return opfv_widen_check(s, a) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8);
+}
+
+#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
{ \
- if (opfv_widen_check(s, a)) { \
+ if (CHECK(s, a)) { \
if (FRM != RISCV_FRM_DYN) { \
gen_set_rm(s, RISCV_FRM_DYN); \
} \
@@ -2645,12 +2660,17 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
return false; \
}
-GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, vfwcvt_xu_f_v, RISCV_FRM_DYN)
-GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, vfwcvt_x_f_v, RISCV_FRM_DYN)
-GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, vfwcvt_f_f_v, RISCV_FRM_DYN)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
+ RISCV_FRM_DYN)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
+ RISCV_FRM_DYN)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, opffv_widen_check, vfwcvt_f_f_v,
+ RISCV_FRM_DYN)
/* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */
-GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, vfwcvt_xu_f_v, RISCV_FRM_RTZ)
-GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, vfwcvt_x_f_v, RISCV_FRM_RTZ)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
+ RISCV_FRM_RTZ)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
+ RISCV_FRM_RTZ)
static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
{
@@ -2699,17 +2719,29 @@ GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v)
static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
- (s->sew != MO_64) &&
vext_check_isa_ill(s) &&
/* OPFV narrowing instructions ignore vs1 check */
vext_check_sd(s, a->rd, a->rs2, a->vm);
}
-#define GEN_OPFV_NARROW_TRANS(NAME, HELPER, FRM) \
+static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
+{
+ return opfv_narrow_check(s, a) &&
+ require_rvf(s) &&
+ (s->sew != MO_64);
+}
+
+static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
+{
+ return opfv_narrow_check(s, a) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8);
+}
+
+#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
{ \
- if (opfv_narrow_check(s, a)) { \
+ if (CHECK(s, a)) { \
if (FRM != RISCV_FRM_DYN) { \
gen_set_rm(s, RISCV_FRM_DYN); \
} \
@@ -2736,11 +2768,15 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
return false; \
}
-GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, vfncvt_f_xu_w, RISCV_FRM_DYN)
-GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, vfncvt_f_x_w, RISCV_FRM_DYN)
-GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, vfncvt_f_f_w, RISCV_FRM_DYN)
+GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, opfxv_narrow_check, vfncvt_f_xu_w,
+ RISCV_FRM_DYN)
+GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w,
+ RISCV_FRM_DYN)
+GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
+ RISCV_FRM_DYN)
/* Reuse the helper function from vfncvt.f.f.w */
-GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, vfncvt_f_f_w, RISCV_FRM_ROD)
+GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
+ RISCV_FRM_ROD)
static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
{
diff --git a/target/riscv/m128_helper.c b/target/riscv/m128_helper.c
new file mode 100644
index 0000000000..7bf115b85e
--- /dev/null
+++ b/target/riscv/m128_helper.c
@@ -0,0 +1,109 @@
+/*
+ * RISC-V Emulation Helpers for QEMU.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/main-loop.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+target_ulong HELPER(divu_i128)(CPURISCVState *env,
+ target_ulong ul, target_ulong uh,
+ target_ulong vl, target_ulong vh)
+{
+ target_ulong ql, qh;
+ Int128 q;
+
+ if (vl == 0 && vh == 0) { /* Handle special behavior on div by zero */
+ ql = ~0x0;
+ qh = ~0x0;
+ } else {
+ q = int128_divu(int128_make128(ul, uh), int128_make128(vl, vh));
+ ql = int128_getlo(q);
+ qh = int128_gethi(q);
+ }
+
+ env->retxh = qh;
+ return ql;
+}
+
+target_ulong HELPER(remu_i128)(CPURISCVState *env,
+ target_ulong ul, target_ulong uh,
+ target_ulong vl, target_ulong vh)
+{
+ target_ulong rl, rh;
+ Int128 r;
+
+ if (vl == 0 && vh == 0) {
+ rl = ul;
+ rh = uh;
+ } else {
+ r = int128_remu(int128_make128(ul, uh), int128_make128(vl, vh));
+ rl = int128_getlo(r);
+ rh = int128_gethi(r);
+ }
+
+ env->retxh = rh;
+ return rl;
+}
+
+target_ulong HELPER(divs_i128)(CPURISCVState *env,
+ target_ulong ul, target_ulong uh,
+ target_ulong vl, target_ulong vh)
+{
+ target_ulong qh, ql;
+ Int128 q;
+
+ if (vl == 0 && vh == 0) { /* Div by zero check */
+ ql = ~0x0;
+ qh = ~0x0;
+ } else if (uh == (1ULL << (TARGET_LONG_BITS - 1)) && ul == 0 &&
+ vh == ~0x0 && vl == ~0x0) {
+ /* Signed div overflow check (-2**127 / -1) */
+ ql = ul;
+ qh = uh;
+ } else {
+ q = int128_divs(int128_make128(ul, uh), int128_make128(vl, vh));
+ ql = int128_getlo(q);
+ qh = int128_gethi(q);
+ }
+
+ env->retxh = qh;
+ return ql;
+}
+
+target_ulong HELPER(rems_i128)(CPURISCVState *env,
+ target_ulong ul, target_ulong uh,
+ target_ulong vl, target_ulong vh)
+{
+ target_ulong rh, rl;
+ Int128 r;
+
+ if (vl == 0 && vh == 0) {
+ rl = ul;
+ rh = uh;
+ } else {
+ r = int128_rems(int128_make128(ul, uh), int128_make128(vl, vh));
+ rl = int128_getlo(r);
+ rh = int128_gethi(r);
+ }
+
+ env->retxh = rh;
+ return rl;
+}
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index ad8248ebfd..13b9ab375b 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -164,6 +164,27 @@ static const VMStateDescription vmstate_pointermasking = {
}
};
+static bool rv128_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+ CPURISCVState *env = &cpu->env;
+
+ return env->misa_mxl_max == MXL_RV128;
+}
+
+static const VMStateDescription vmstate_rv128 = {
+ .name = "cpu/rv128",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = rv128_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32),
+ VMSTATE_UINT64(env.mscratchh, RISCVCPU),
+ VMSTATE_UINT64(env.sscratchh, RISCVCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_riscv_cpu = {
.name = "cpu",
.version_id = 3,
@@ -218,6 +239,7 @@ const VMStateDescription vmstate_riscv_cpu = {
&vmstate_hyper,
&vmstate_vector,
&vmstate_pointermasking,
+ &vmstate_rv128,
NULL
}
};
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
index d5e0bc93ea..a32158da93 100644
--- a/target/riscv/meson.build
+++ b/target/riscv/meson.build
@@ -18,6 +18,7 @@ riscv_ss.add(files(
'vector_helper.c',
'bitmanip_helper.c',
'translate.c',
+ 'm128_helper.c'
))
riscv_softmmu_ss = ss.source_set()
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index ee7c24efe7..6f040f2fb9 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -69,6 +69,50 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr,
return val;
}
+target_ulong helper_csrr_i128(CPURISCVState *env, int csr)
+{
+ Int128 rv = int128_zero();
+ RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
+ int128_zero(),
+ int128_zero());
+
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+
+ env->retxh = int128_gethi(rv);
+ return int128_getlo(rv);
+}
+
+void helper_csrw_i128(CPURISCVState *env, int csr,
+ target_ulong srcl, target_ulong srch)
+{
+ RISCVException ret = riscv_csrrw_i128(env, csr, NULL,
+ int128_make128(srcl, srch),
+ UINT128_MAX);
+
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+}
+
+target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
+ target_ulong srcl, target_ulong srch,
+ target_ulong maskl, target_ulong maskh)
+{
+ Int128 rv = int128_zero();
+ RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
+ int128_make128(srcl, srch),
+ int128_make128(maskl, maskh));
+
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+
+ env->retxh = int128_gethi(rv);
+ return int128_getlo(rv);
+}
+
#ifndef CONFIG_USER_ONLY
target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
@@ -146,7 +190,8 @@ target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb)
uint64_t mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
- if (!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
+ if (riscv_feature(env, RISCV_FEATURE_PMP) &&
+ !pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index 5df6c0d800..615048ec87 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -33,7 +33,7 @@
#include "internals.h"
/* global register indices */
-static TCGv cpu_gpr[32], cpu_pc, cpu_vl, cpu_vstart;
+static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
static TCGv load_res;
static TCGv load_val;
@@ -59,6 +59,7 @@ typedef struct DisasContext {
/* pc_succ_insn points to the instruction following base.pc_next */
target_ulong pc_succ_insn;
target_ulong priv_ver;
+ RISCVMXL misa_mxl_max;
RISCVMXL xl;
uint32_t misa_ext;
uint32_t opcode;
@@ -141,6 +142,13 @@ static inline int get_olen(DisasContext *ctx)
return 16 << get_ol(ctx);
}
+/* The maximum register length */
+#ifdef TARGET_RISCV32
+#define get_xl_max(ctx) MXL_RV32
+#else
+#define get_xl_max(ctx) ((ctx)->misa_mxl_max)
+#endif
+
/*
* RISC-V requires NaN-boxing of narrower width floating point values.
* This applies when a 32-bit value is assigned to a 64-bit FP register.
@@ -200,6 +208,9 @@ static void generate_exception_mtval(DisasContext *ctx, int excp)
static void gen_exception_illegal(DisasContext *ctx)
{
+ tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), cpu_env,
+ offsetof(CPURISCVState, bins));
+
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
}
@@ -260,6 +271,7 @@ static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext)
}
break;
case MXL_RV64:
+ case MXL_RV128:
break;
default:
g_assert_not_reached();
@@ -267,6 +279,15 @@ static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext)
return cpu_gpr[reg_num];
}
+static TCGv get_gprh(DisasContext *ctx, int reg_num)
+{
+ assert(get_xl(ctx) == MXL_RV128);
+ if (reg_num == 0) {
+ return ctx->zero;
+ }
+ return cpu_gprh[reg_num];
+}
+
static TCGv dest_gpr(DisasContext *ctx, int reg_num)
{
if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) {
@@ -275,6 +296,14 @@ static TCGv dest_gpr(DisasContext *ctx, int reg_num)
return cpu_gpr[reg_num];
}
+static TCGv dest_gprh(DisasContext *ctx, int reg_num)
+{
+ if (reg_num == 0) {
+ return temp_new(ctx);
+ }
+ return cpu_gprh[reg_num];
+}
+
static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
{
if (reg_num != 0) {
@@ -283,11 +312,46 @@ static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
break;
case MXL_RV64:
+ case MXL_RV128:
tcg_gen_mov_tl(cpu_gpr[reg_num], t);
break;
default:
g_assert_not_reached();
}
+
+ if (get_xl_max(ctx) == MXL_RV128) {
+ tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63);
+ }
+ }
+}
+
+static void gen_set_gpri(DisasContext *ctx, int reg_num, target_long imm)
+{
+ if (reg_num != 0) {
+ switch (get_ol(ctx)) {
+ case MXL_RV32:
+ tcg_gen_movi_tl(cpu_gpr[reg_num], (int32_t)imm);
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ tcg_gen_movi_tl(cpu_gpr[reg_num], imm);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (get_xl_max(ctx) == MXL_RV128) {
+ tcg_gen_movi_tl(cpu_gprh[reg_num], -(imm < 0));
+ }
+ }
+}
+
+static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh)
+{
+ assert(get_ol(ctx) == MXL_RV128);
+ if (reg_num != 0) {
+ tcg_gen_mov_tl(cpu_gpr[reg_num], rl);
+ tcg_gen_mov_tl(cpu_gprh[reg_num], rh);
}
}
@@ -443,10 +507,22 @@ EX_SH(12)
} \
} while (0)
-#define REQUIRE_64BIT(ctx) do { \
- if (get_xl(ctx) < MXL_RV64) { \
- return false; \
- } \
+#define REQUIRE_64BIT(ctx) do { \
+ if (get_xl(ctx) != MXL_RV64) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_128BIT(ctx) do { \
+ if (get_xl(ctx) != MXL_RV128) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_64_OR_128BIT(ctx) do { \
+ if (get_xl(ctx) == MXL_RV32) { \
+ return false; \
+ } \
} while (0)
static int ex_rvc_register(DisasContext *ctx, int reg)
@@ -463,62 +539,146 @@ static int ex_rvc_shifti(DisasContext *ctx, int imm)
/* Include the auto-generated decoder for 32 bit insn */
#include "decode-insn32.c.inc"
-static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
+static bool gen_logic_imm_fn(DisasContext *ctx, arg_i *a,
void (*func)(TCGv, TCGv, target_long))
{
TCGv dest = dest_gpr(ctx, a->rd);
- TCGv src1 = get_gpr(ctx, a->rs1, ext);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
func(dest, src1, a->imm);
- gen_set_gpr(ctx, a->rd, dest);
+ if (get_xl(ctx) == MXL_RV128) {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ func(desth, src1h, -(a->imm < 0));
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ } else {
+ gen_set_gpr(ctx, a->rd, dest);
+ }
+
+ return true;
+}
+
+static bool gen_logic(DisasContext *ctx, arg_r *a,
+ void (*func)(TCGv, TCGv, TCGv))
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ func(dest, src1, src2);
+
+ if (get_xl(ctx) == MXL_RV128) {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv src2h = get_gprh(ctx, a->rs2);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ func(desth, src1h, src2h);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ } else {
+ gen_set_gpr(ctx, a->rd, dest);
+ }
+
+ return true;
+}
+
+static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
+ void (*func)(TCGv, TCGv, target_long),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long))
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
+
+ if (get_ol(ctx) < MXL_RV128) {
+ func(dest, src1, a->imm);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ if (f128 == NULL) {
+ return false;
+ }
+
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ f128(dest, desth, src1, src1h, a->imm);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
return true;
}
static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
- void (*func)(TCGv, TCGv, TCGv))
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
{
TCGv dest = dest_gpr(ctx, a->rd);
TCGv src1 = get_gpr(ctx, a->rs1, ext);
TCGv src2 = tcg_constant_tl(a->imm);
- func(dest, src1, src2);
+ if (get_ol(ctx) < MXL_RV128) {
+ func(dest, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ if (f128 == NULL) {
+ return false;
+ }
- gen_set_gpr(ctx, a->rd, dest);
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv src2h = tcg_constant_tl(-(a->imm < 0));
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ f128(dest, desth, src1, src1h, src2, src2h);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
return true;
}
static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
- void (*func)(TCGv, TCGv, TCGv))
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
{
TCGv dest = dest_gpr(ctx, a->rd);
TCGv src1 = get_gpr(ctx, a->rs1, ext);
TCGv src2 = get_gpr(ctx, a->rs2, ext);
- func(dest, src1, src2);
+ if (get_ol(ctx) < MXL_RV128) {
+ func(dest, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ if (f128 == NULL) {
+ return false;
+ }
- gen_set_gpr(ctx, a->rd, dest);
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv src2h = get_gprh(ctx, a->rs2);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ f128(dest, desth, src1, src1h, src2, src2h);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
return true;
}
static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
void (*f_tl)(TCGv, TCGv, TCGv),
- void (*f_32)(TCGv, TCGv, TCGv))
+ void (*f_32)(TCGv, TCGv, TCGv),
+ void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
{
int olen = get_olen(ctx);
if (olen != TARGET_LONG_BITS) {
if (olen == 32) {
f_tl = f_32;
- } else {
+ } else if (olen != 128) {
g_assert_not_reached();
}
}
- return gen_arith(ctx, a, ext, f_tl);
+ return gen_arith(ctx, a, ext, f_tl, f_128);
}
static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext,
- void (*func)(TCGv, TCGv, target_long))
+ void (*func)(TCGv, TCGv, target_long),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long))
{
TCGv dest, src1;
int max_len = get_olen(ctx);
@@ -530,26 +690,38 @@ static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext,
dest = dest_gpr(ctx, a->rd);
src1 = get_gpr(ctx, a->rs1, ext);
- func(dest, src1, a->shamt);
+ if (max_len < 128) {
+ func(dest, src1, a->shamt);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv desth = dest_gprh(ctx, a->rd);
- gen_set_gpr(ctx, a->rd, dest);
+ if (f128 == NULL) {
+ return false;
+ }
+ f128(dest, desth, src1, src1h, a->shamt);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
return true;
}
static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a,
DisasExtend ext,
void (*f_tl)(TCGv, TCGv, target_long),
- void (*f_32)(TCGv, TCGv, target_long))
+ void (*f_32)(TCGv, TCGv, target_long),
+ void (*f_128)(TCGv, TCGv, TCGv, TCGv,
+ target_long))
{
int olen = get_olen(ctx);
if (olen != TARGET_LONG_BITS) {
if (olen == 32) {
f_tl = f_32;
- } else {
+ } else if (olen != 128) {
g_assert_not_reached();
}
}
- return gen_shift_imm_fn(ctx, a, ext, f_tl);
+ return gen_shift_imm_fn(ctx, a, ext, f_tl, f_128);
}
static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext,
@@ -573,34 +745,49 @@ static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext,
}
static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext,
- void (*func)(TCGv, TCGv, TCGv))
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv))
{
- TCGv dest = dest_gpr(ctx, a->rd);
- TCGv src1 = get_gpr(ctx, a->rs1, ext);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
TCGv ext2 = tcg_temp_new();
+ int max_len = get_olen(ctx);
- tcg_gen_andi_tl(ext2, src2, get_olen(ctx) - 1);
- func(dest, src1, ext2);
+ tcg_gen_andi_tl(ext2, src2, max_len - 1);
- gen_set_gpr(ctx, a->rd, dest);
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
+
+ if (max_len < 128) {
+ func(dest, src1, ext2);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ if (f128 == NULL) {
+ return false;
+ }
+ f128(dest, desth, src1, src1h, ext2);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
tcg_temp_free(ext2);
return true;
}
static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
void (*f_tl)(TCGv, TCGv, TCGv),
- void (*f_32)(TCGv, TCGv, TCGv))
+ void (*f_32)(TCGv, TCGv, TCGv),
+ void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv))
{
int olen = get_olen(ctx);
if (olen != TARGET_LONG_BITS) {
if (olen == 32) {
f_tl = f_32;
- } else {
+ } else if (olen != 128) {
g_assert_not_reached();
}
}
- return gen_shift(ctx, a, ext, f_tl);
+ return gen_shift(ctx, a, ext, f_tl, f_128);
}
static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
@@ -662,6 +849,7 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
if (!has_ext(ctx, RVC)) {
gen_exception_illegal(ctx);
} else {
+ ctx->opcode = opcode;
ctx->pc_succ_insn = ctx->base.pc_next + 2;
if (!decode_insn16(ctx, opcode)) {
gen_exception_illegal(ctx);
@@ -672,6 +860,7 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
opcode32 = deposit32(opcode32, 16, 16,
translator_lduw(env, &ctx->base,
ctx->base.pc_next + 2));
+ ctx->opcode = opcode32;
ctx->pc_succ_insn = ctx->base.pc_next + 4;
if (!decode_insn32(ctx, opcode32)) {
gen_exception_illegal(ctx);
@@ -715,6 +904,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
ctx->vstart = env->vstart;
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
+ ctx->misa_mxl_max = env->misa_mxl_max;
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->cs = cs;
ctx->ntemp = 0;
@@ -819,10 +1009,13 @@ void riscv_translate_init(void)
* unless you specifically block reads/writes to reg 0.
*/
cpu_gpr[0] = NULL;
+ cpu_gprh[0] = NULL;
for (i = 1; i < 32; i++) {
cpu_gpr[i] = tcg_global_mem_new(cpu_env,
offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
+ cpu_gprh[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPURISCVState, gprh[i]), riscv_int_regnamesh[i]);
}
for (i = 0; i < 32; i++) {
diff --git a/target/s390x/tcg/insn-data.def b/target/s390x/tcg/insn-data.def
index 3e5594210c..f0af458aee 100644
--- a/target/s390x/tcg/insn-data.def
+++ b/target/s390x/tcg/insn-data.def
@@ -45,7 +45,7 @@
D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL)
C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32)
C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64)
- D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ)
+ D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEUQ)
C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64)
/* ADD IMMEDIATE HIGH */
C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32)
@@ -76,7 +76,7 @@
/* ADD LOGICAL WITH SIGNED IMMEDIATE */
D(0xeb6e, ALSI, SIY, GIE, la1, i2_32u, new, 0, asi, addu32, MO_TEUL)
C(0xecda, ALHSIK, RIE_d, DO, r3_32u, i2_32u, new, r1_32, add, addu32)
- D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asiu64, addu64, MO_TEQ)
+ D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asiu64, addu64, MO_TEUQ)
C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, addu64, addu64)
/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */
C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, addu32)
@@ -269,10 +269,10 @@
/* COMPARE AND SWAP */
D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL)
D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL)
- D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ)
+ D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEUQ)
/* COMPARE DOUBLE AND SWAP */
- D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ)
- D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ)
+ D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEUQ)
+ D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEUQ)
C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0)
/* COMPARE AND SWAP AND STORE */
C(0xc802, CSST, SSF, CASS, la1, a2, 0, 0, csst, 0)
@@ -436,19 +436,19 @@
C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0)
/* LOAD AND ADD */
D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL)
- D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ)
+ D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEUQ)
/* LOAD AND ADD LOGICAL */
D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL)
- D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ)
+ D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEUQ)
/* LOAD AND AND */
D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL)
- D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ)
+ D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEUQ)
/* LOAD AND EXCLUSIVE OR */
D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL)
- D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ)
+ D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEUQ)
/* LOAD AND OR */
D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL)
- D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ)
+ D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEUQ)
/* LOAD AND TEST */
C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32)
C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64)
@@ -565,7 +565,7 @@
C(0xebe0, LOCFH, RSY_b, LOC2, r1_sr32, m2_32u, new, r1_32h, loc, 0)
/* LOAD PAIR DISJOINT */
D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL)
- D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ)
+ D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEUQ)
/* LOAD PAIR FROM QUADWORD */
C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0)
/* LOAD POSITIVE */
@@ -1279,7 +1279,7 @@
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV)
- E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ, IF_PRIV)
+ E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEUQ, IF_PRIV)
/* DIAGNOSE (KVM hypercall) */
F(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0, IF_PRIV | IF_IO)
/* INSERT STORAGE KEY EXTENDED */
@@ -1303,7 +1303,7 @@
F(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV)
/* LOAD USING REAL ADDRESS */
E(0xb24b, LURA, RRE, Z, 0, ra2, new, r1_32, lura, 0, MO_TEUL, IF_PRIV)
- E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEQ, IF_PRIV)
+ E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEUQ, IF_PRIV)
/* MOVE TO PRIMARY */
F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV)
/* MOVE TO SECONDARY */
@@ -1357,7 +1357,7 @@
F(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV)
/* STORE USING REAL ADDRESS */
E(0xb246, STURA, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEUL, IF_PRIV)
- E(0xb925, STURG, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEQ, IF_PRIV)
+ E(0xb925, STURG, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEUQ, IF_PRIV)
/* TEST BLOCK */
F(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0, IF_PRIV)
/* TEST PROTECTION */
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index 362a30d99e..406578d105 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -1895,7 +1895,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
if (parallel) {
#ifdef CONFIG_ATOMIC64
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
+ MemOpIdx oi = make_memop_idx(MO_TEUQ | MO_ALIGN, mem_idx);
ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
#else
/* Note that we asserted !parallel above. */
@@ -1970,7 +1970,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
cpu_stq_data_ra(env, a2 + 0, svh, ra);
cpu_stq_data_ra(env, a2 + 8, svl, ra);
} else if (HAVE_ATOMIC128) {
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ MemOpIdx oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx);
Int128 sv = int128_make128(svl, svh);
cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra);
} else {
@@ -2494,7 +2494,7 @@ uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
assert(HAVE_ATOMIC128);
mem_idx = cpu_mmu_index(env, false);
- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx);
v = cpu_atomic_ldo_be_mmu(env, addr, oi, ra);
hi = int128_gethi(v);
lo = int128_getlo(v);
@@ -2525,7 +2525,7 @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
assert(HAVE_ATOMIC128);
mem_idx = cpu_mmu_index(env, false);
- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx);
v = int128_make128(low, high);
cpu_atomic_sto_be_mmu(env, addr, v, oi, ra);
}
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index dcc249a197..f180853e7a 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -3063,7 +3063,7 @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
t1 = tcg_temp_new_i64();
t2 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
- MO_TEQ | MO_ALIGN_8);
+ MO_TEUQ | MO_ALIGN_8);
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
gen_helper_load_psw(cpu_env, t1, t2);
@@ -4295,7 +4295,7 @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
gen_helper_sck(cc_op, cpu_env, o->in1);
set_cc_static(s);
return DISAS_NEXT;
@@ -5521,7 +5521,7 @@ static void wout_m1_64(DisasContext *s, DisasOps *o)
#ifndef CONFIG_USER_ONLY
static void wout_m1_64a(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
}
#define SPEC_wout_m1_64a 0
#endif
@@ -5997,7 +5997,7 @@ static void in2_m2_64w(DisasContext *s, DisasOps *o)
static void in2_m2_64a(DisasContext *s, DisasOps *o)
{
in2_a2(s, o);
- tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
}
#define SPEC_in2_m2_64a 0
#endif
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
index 28bf5a23b6..98eb7710a4 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -424,9 +424,9 @@ static DisasJumpType op_vl(DisasContext *s, DisasOps *o)
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
- tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
tcg_temp_free(t0);
@@ -592,16 +592,16 @@ static DisasJumpType op_vlm(DisasContext *s, DisasOps *o)
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8);
- tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEUQ);
for (;; v1++) {
- tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
write_vec_element_i64(t1, v1, 0, ES_64);
if (v1 == v3) {
break;
}
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
- tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
write_vec_element_i64(t1, v1, 1, ES_64);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
}
@@ -950,10 +950,10 @@ static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -993,10 +993,10 @@ static DisasJumpType op_vstm(DisasContext *s, DisasOps *o)
for (;; v1++) {
read_vec_element_i64(tmp, v1, 0, ES_64);
- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
read_vec_element_i64(tmp, v1, 1, ES_64);
- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
if (v1 == v3) {
break;
}
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 4cfb109f56..fb9dd9db2f 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -83,6 +83,7 @@
#define DELAY_SLOT_RTE (1 << 2)
#define TB_FLAG_PENDING_MOVCA (1 << 3)
+#define TB_FLAG_UNALIGN (1 << 4)
#define GUSA_SHIFT 4
#ifdef CONFIG_USER_ONLY
@@ -373,6 +374,9 @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
| (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
| (env->sr & (1u << SR_FD)) /* Bit 15 */
| (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
+#ifdef CONFIG_USER_ONLY
+ *flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
+#endif
}
#endif /* SH4_CPU_H */
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index ce5d674a52..43bc88b7b3 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -50,8 +50,10 @@ typedef struct DisasContext {
#if defined(CONFIG_USER_ONLY)
#define IS_USER(ctx) 1
+#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
#else
#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
+#define UNALIGN(C) 0
#endif
/* Target-specific values for ctx->base.is_jmp. */
@@ -495,7 +497,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUL | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
@@ -503,7 +506,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
+ MO_TESL | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
@@ -558,19 +562,23 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
return;
case 0x2001: /* mov.w Rm,@Rn */
- tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
+ MO_TEUW | UNALIGN(ctx));
return;
case 0x2002: /* mov.l Rm,@Rn */
- tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
+ MO_TEUL | UNALIGN(ctx));
return;
case 0x6000: /* mov.b @Rm,Rn */
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
return;
case 0x6001: /* mov.w @Rm,Rn */
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TESW | UNALIGN(ctx));
return;
case 0x6002: /* mov.l @Rm,Rn */
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TESL | UNALIGN(ctx));
return;
case 0x2004: /* mov.b Rm,@-Rn */
{
@@ -586,7 +594,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 2);
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUW | UNALIGN(ctx));
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
}
@@ -595,7 +604,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUL | UNALIGN(ctx));
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
}
@@ -606,12 +616,14 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
return;
case 0x6005: /* mov.w @Rm+,Rn */
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TESW | UNALIGN(ctx));
if ( B11_8 != B7_4 )
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
return;
case 0x6006: /* mov.l @Rm+,Rn */
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TESL | UNALIGN(ctx));
if ( B11_8 != B7_4 )
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
return;
@@ -627,7 +639,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUW | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
@@ -635,7 +648,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUL | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
@@ -651,7 +665,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
+ MO_TESW | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
@@ -659,7 +674,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
+ MO_TESL | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
@@ -994,7 +1010,7 @@ static void _decode_opc(DisasContext * ctx)
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
- tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEUQ);
tcg_temp_free_i64(fp);
} else {
tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
@@ -1004,7 +1020,7 @@ static void _decode_opc(DisasContext * ctx)
CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
tcg_temp_free_i64(fp);
} else {
@@ -1015,7 +1031,7 @@ static void _decode_opc(DisasContext * ctx)
CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
tcg_temp_free_i64(fp);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
@@ -1032,7 +1048,7 @@ static void _decode_opc(DisasContext * ctx)
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
tcg_gen_subi_i32(addr, REG(B11_8), 8);
- tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
tcg_temp_free_i64(fp);
} else {
tcg_gen_subi_i32(addr, REG(B11_8), 4);
@@ -1049,7 +1065,7 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEUQ);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
tcg_temp_free_i64(fp);
} else {
@@ -1066,7 +1082,7 @@ static void _decode_opc(DisasContext * ctx)
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
- tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
tcg_temp_free_i64(fp);
} else {
tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
@@ -1253,7 +1269,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
- tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
+ MO_TEUW | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
@@ -1269,7 +1286,8 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
- tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
+ MO_TESW | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index fdb8bbe5dc..4c7c7b5347 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -2464,7 +2464,7 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
static void gen_ldf_asi(DisasContext *dc, TCGv addr,
int insn, int size, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
TCGv_i32 d32;
TCGv_i64 d64;
@@ -2578,7 +2578,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
static void gen_stf_asi(DisasContext *dc, TCGv addr,
int insn, int size, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
TCGv_i32 d32;
switch (da.type) {
@@ -2660,7 +2660,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv_i64 hi = gen_dest_gpr(dc, rd);
TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
@@ -2727,7 +2727,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
int insn, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv lo = gen_load_gpr(dc, rd + 1);
switch (da.type) {
@@ -2787,7 +2787,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
int insn, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv oldv;
switch (da.type) {
@@ -2817,7 +2817,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
TCGv lo = gen_dest_gpr(dc, rd | 1);
TCGv hi = gen_dest_gpr(dc, rd);
TCGv_i64 t64 = tcg_temp_new_i64();
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
switch (da.type) {
case GET_ASI_EXCP:
@@ -2830,7 +2830,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
default:
{
TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+ TCGv_i32 r_mop = tcg_const_i32(MO_UQ);
save_state(dc);
gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
@@ -2849,7 +2849,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
int insn, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv lo = gen_load_gpr(dc, rd + 1);
TCGv_i64 t64 = tcg_temp_new_i64();
@@ -2886,7 +2886,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
default:
{
TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+ TCGv_i32 r_mop = tcg_const_i32(MO_UQ);
save_state(dc);
gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
@@ -5479,7 +5479,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
break;
case 0x1b: /* V9 ldxa */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
break;
case 0x2d: /* V9 prefetch, no effect */
goto skip_move;
@@ -5533,7 +5533,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
if (rd == 1) {
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t64, cpu_addr,
- dc->mem_idx, MO_TEQ);
+ dc->mem_idx, MO_TEUQ);
gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
tcg_temp_free_i64(t64);
break;
@@ -5549,11 +5549,11 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
gen_address_mask(dc, cpu_addr);
cpu_src1_64 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
- MO_TEQ | MO_ALIGN_4);
+ MO_TEUQ | MO_ALIGN_4);
tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
cpu_src2_64 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
- MO_TEQ | MO_ALIGN_4);
+ MO_TEUQ | MO_ALIGN_4);
gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
tcg_temp_free_i64(cpu_src1_64);
tcg_temp_free_i64(cpu_src2_64);
@@ -5562,7 +5562,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
gen_address_mask(dc, cpu_addr);
cpu_dst_64 = gen_dest_fpr_D(dc, rd);
tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
- MO_TEQ | MO_ALIGN_4);
+ MO_TEUQ | MO_ALIGN_4);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
default:
@@ -5623,7 +5623,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x1e: /* V9 stxa */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
break;
#endif
default:
@@ -5664,11 +5664,11 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
before performing the first write. */
cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
- dc->mem_idx, MO_TEQ | MO_ALIGN_16);
+ dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
- dc->mem_idx, MO_TEQ);
+ dc->mem_idx, MO_TEUQ);
break;
#else /* !TARGET_SPARC64 */
/* stdfq, store floating point queue */
@@ -5687,7 +5687,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
gen_address_mask(dc, cpu_addr);
cpu_src1_64 = gen_load_fpr_D(dc, rd);
tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
- MO_TEQ | MO_ALIGN_4);
+ MO_TEUQ | MO_ALIGN_4);
break;
default:
goto illegal_insn;
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 07084407cb..417edbd3f0 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -246,7 +246,7 @@ static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
TCGv_i64 temp = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp, rl, rh);
- tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEUQ);
tcg_temp_free_i64(temp);
}
@@ -264,7 +264,7 @@ static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
{
TCGv_i64 temp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEUQ);
/* write back to two 32 bit regs */
tcg_gen_extr_i64_i32(rl, rh, temp);
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 09430c1bf9..b1491ed625 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -7077,7 +7077,7 @@ static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[],
} else {
addr = arg[1].in;
}
- mop = gen_load_store_alignment(dc, MO_TEQ, addr);
+ mop = gen_load_store_alignment(dc, MO_TEUQ, addr);
if (par[0]) {
tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop);
} else {
@@ -7142,7 +7142,7 @@ static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[],
} else {
addr = arg[1].in;
}
- mop = gen_load_store_alignment(dc, MO_TEQ, addr);
+ mop = gen_load_store_alignment(dc, MO_TEUQ, addr);
if (par[0]) {
tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop);
} else {
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 5edca8d44d..a8db553287 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -1744,7 +1744,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
case MO_SL:
tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
break;
- case MO_Q:
+ case MO_UQ:
tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
break;
default:
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index 9d322cdba6..5345c4e39c 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -1443,13 +1443,13 @@ static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
#ifdef HOST_WORDS_BIGENDIAN
[MO_UW] = helper_be_lduw_mmu,
[MO_UL] = helper_be_ldul_mmu,
- [MO_Q] = helper_be_ldq_mmu,
+ [MO_UQ] = helper_be_ldq_mmu,
[MO_SW] = helper_be_ldsw_mmu,
[MO_SL] = helper_be_ldul_mmu,
#else
[MO_UW] = helper_le_lduw_mmu,
[MO_UL] = helper_le_ldul_mmu,
- [MO_Q] = helper_le_ldq_mmu,
+ [MO_UQ] = helper_le_ldq_mmu,
[MO_SW] = helper_le_ldsw_mmu,
[MO_SL] = helper_le_ldul_mmu,
#endif
@@ -1694,7 +1694,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
default:
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
break;
- case MO_Q:
+ case MO_UQ:
if (datalo != TCG_REG_R1) {
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
@@ -1781,7 +1781,7 @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
case MO_UL:
tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
break;
- case MO_Q:
+ case MO_UQ:
/* Avoid ldrd for user-only emulation, to handle unaligned. */
if (USING_SOFTMMU && use_armv6_instructions
&& (datalo & 1) == 0 && datahi == datalo + 1) {
@@ -1824,7 +1824,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
case MO_UL:
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
break;
- case MO_Q:
+ case MO_UQ:
/* Avoid ldrd for user-only emulation, to handle unaligned. */
if (USING_SOFTMMU && use_armv6_instructions
&& (datalo & 1) == 0 && datahi == datalo + 1) {
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 84b109bb84..875311f795 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -1615,10 +1615,10 @@ static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_UB] = helper_ret_ldub_mmu,
[MO_LEUW] = helper_le_lduw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
};
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
@@ -1628,10 +1628,10 @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
/* Perform the TLB load and compare.
@@ -1827,7 +1827,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
case MO_UL:
tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
break;
- case MO_Q:
+ case MO_UQ:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
} else if (data_reg == TCG_REG_EDX) {
@@ -2019,7 +2019,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
break;
#endif
- case MO_Q:
+ case MO_UQ:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
base, index, 0, ofs);
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index d8f6914f03..27b020e66c 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -1023,11 +1023,11 @@ static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
[MO_LEUW] = helper_le_lduw_mmu,
[MO_LESW] = helper_le_ldsw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BESW] = helper_be_ldsw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
#if TCG_TARGET_REG_BITS == 64
[MO_LESL] = helper_le_ldsl_mmu,
[MO_BESL] = helper_be_ldsl_mmu,
@@ -1038,10 +1038,10 @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
/* Helper routines for marshalling helper function arguments into
@@ -1384,7 +1384,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
case MO_SL:
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
break;
- case MO_Q | MO_BSWAP:
+ case MO_UQ | MO_BSWAP:
if (TCG_TARGET_REG_BITS == 64) {
if (use_mips32r2_instructions) {
tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
@@ -1413,7 +1413,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3);
}
break;
- case MO_Q:
+ case MO_UQ:
/* Prefer to load from offset 0 first, but allow for overlap. */
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 2397f2cf93..e573000951 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -308,13 +308,13 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
CASE_OP_32_64(mul):
return x * y;
- CASE_OP_32_64(and):
+ CASE_OP_32_64_VEC(and):
return x & y;
- CASE_OP_32_64(or):
+ CASE_OP_32_64_VEC(or):
return x | y;
- CASE_OP_32_64(xor):
+ CASE_OP_32_64_VEC(xor):
return x ^ y;
case INDEX_op_shl_i32:
@@ -347,16 +347,16 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
case INDEX_op_rotl_i64:
return rol64(x, y & 63);
- CASE_OP_32_64(not):
+ CASE_OP_32_64_VEC(not):
return ~x;
CASE_OP_32_64(neg):
return -x;
- CASE_OP_32_64(andc):
+ CASE_OP_32_64_VEC(andc):
return x & ~y;
- CASE_OP_32_64(orc):
+ CASE_OP_32_64_VEC(orc):
return x | ~y;
CASE_OP_32_64(eqv):
@@ -751,6 +751,12 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_commutative(OptContext *ctx, TCGOp *op)
+{
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
+ return false;
+}
+
static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
{
swap_commutative(op->args[0], &op->args[1], &op->args[2]);
@@ -905,6 +911,16 @@ static bool fold_add(OptContext *ctx, TCGOp *op)
return false;
}
+/* We cannot as yet do_constant_folding with vectors. */
+static bool fold_add_vec(OptContext *ctx, TCGOp *op)
+{
+ if (fold_commutative(ctx, op) ||
+ fold_xi_to_x(ctx, op, 0)) {
+ return true;
+ }
+ return false;
+}
+
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
{
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
@@ -1938,10 +1954,10 @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
return false;
}
-static bool fold_sub(OptContext *ctx, TCGOp *op)
+/* We cannot as yet do_constant_folding with vectors. */
+static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, 0) ||
+ if (fold_xx_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, 0) ||
fold_sub_to_neg(ctx, op)) {
return true;
@@ -1949,6 +1965,11 @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_sub(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op) || fold_sub_vec(ctx, op);
+}
+
static bool fold_sub2(OptContext *ctx, TCGOp *op)
{
return fold_addsub2(ctx, op, false);
@@ -2052,9 +2073,12 @@ void tcg_optimize(TCGContext *s)
* Sorted alphabetically by opcode as much as possible.
*/
switch (opc) {
- CASE_OP_32_64_VEC(add):
+ CASE_OP_32_64(add):
done = fold_add(&ctx, op);
break;
+ case INDEX_op_add_vec:
+ done = fold_add_vec(&ctx, op);
+ break;
CASE_OP_32_64(add2):
done = fold_add2(&ctx, op);
break;
@@ -2193,9 +2217,12 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(sextract):
done = fold_sextract(&ctx, op);
break;
- CASE_OP_32_64_VEC(sub):
+ CASE_OP_32_64(sub):
done = fold_sub(&ctx, op);
break;
+ case INDEX_op_sub_vec:
+ done = fold_sub_vec(&ctx, op);
+ break;
CASE_OP_32_64(sub2):
done = fold_sub2(&ctx, op);
break;
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index 3e4ca2be88..9e79a7edee 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -1935,24 +1935,24 @@ static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
[MO_UB] = LBZX,
[MO_UW] = LHZX,
[MO_UL] = LWZX,
- [MO_Q] = LDX,
+ [MO_UQ] = LDX,
[MO_SW] = LHAX,
[MO_SL] = LWAX,
[MO_BSWAP | MO_UB] = LBZX,
[MO_BSWAP | MO_UW] = LHBRX,
[MO_BSWAP | MO_UL] = LWBRX,
- [MO_BSWAP | MO_Q] = LDBRX,
+ [MO_BSWAP | MO_UQ] = LDBRX,
};
static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
[MO_UB] = STBX,
[MO_UW] = STHX,
[MO_UL] = STWX,
- [MO_Q] = STDX,
+ [MO_UQ] = STDX,
[MO_BSWAP | MO_UB] = STBX,
[MO_BSWAP | MO_UW] = STHBRX,
[MO_BSWAP | MO_UL] = STWBRX,
- [MO_BSWAP | MO_Q] = STDBRX,
+ [MO_BSWAP | MO_UQ] = STDBRX,
};
static const uint32_t qemu_exts_opc[4] = {
@@ -1969,10 +1969,10 @@ static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_UB] = helper_ret_ldub_mmu,
[MO_LEUW] = helper_le_lduw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
@@ -1982,10 +1982,10 @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
/* We expect to use a 16-bit negative offset from ENV. */
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index 9b13a46fb4..e9488f7093 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -862,7 +862,7 @@ static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
#if TCG_TARGET_REG_BITS == 64
[MO_SL] = helper_be_ldsl_mmu,
#endif
- [MO_Q] = helper_be_ldq_mmu,
+ [MO_UQ] = helper_be_ldq_mmu,
#else
[MO_UW] = helper_le_lduw_mmu,
[MO_SW] = helper_le_ldsw_mmu,
@@ -870,7 +870,7 @@ static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
#if TCG_TARGET_REG_BITS == 64
[MO_SL] = helper_le_ldsl_mmu,
#endif
- [MO_Q] = helper_le_ldq_mmu,
+ [MO_UQ] = helper_le_ldq_mmu,
#endif
};
@@ -1083,7 +1083,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
case MO_SL:
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
break;
- case MO_Q:
+ case MO_UQ:
/* Prefer to load from offset 0 first, but allow for overlap. */
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index 57e803e339..b12fbfda63 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -438,22 +438,22 @@ static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
[MO_LESW] = helper_le_ldsw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
[MO_LESL] = helper_le_ldsl_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BESW] = helper_be_ldsw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
[MO_BESL] = helper_be_ldsl_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
};
static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
#endif
@@ -1745,10 +1745,10 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
tcg_out_insn(s, RXY, LGF, data, base, index, disp);
break;
- case MO_Q | MO_BSWAP:
+ case MO_UQ | MO_BSWAP:
tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
break;
- case MO_Q:
+ case MO_UQ:
tcg_out_insn(s, RXY, LG, data, base, index, disp);
break;
@@ -1791,10 +1791,10 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
}
break;
- case MO_Q | MO_BSWAP:
+ case MO_UQ | MO_BSWAP:
tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
break;
- case MO_Q:
+ case MO_UQ:
tcg_out_insn(s, RXY, STG, data, base, index, disp);
break;
@@ -1928,7 +1928,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
case MO_UL:
tgen_ext32u(s, TCG_REG_R4, data_reg);
break;
- case MO_Q:
+ case MO_UQ:
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
break;
default:
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
index 9dd32ef95e..0c062c60eb 100644
--- a/tcg/sparc/tcg-target.c.inc
+++ b/tcg/sparc/tcg-target.c.inc
@@ -889,20 +889,20 @@ static void build_trampolines(TCGContext *s)
[MO_LEUW] = helper_le_lduw_mmu,
[MO_LESW] = helper_le_ldsw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BESW] = helper_be_ldsw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
};
static void * const qemu_st_helpers[] = {
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
int i;
@@ -1126,13 +1126,13 @@ static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
[MO_BESW] = LDSH,
[MO_BEUL] = LDUW,
[MO_BESL] = LDSW,
- [MO_BEQ] = LDX,
+ [MO_BEUQ] = LDX,
[MO_LEUW] = LDUH_LE,
[MO_LESW] = LDSH_LE,
[MO_LEUL] = LDUW_LE,
[MO_LESL] = LDSW_LE,
- [MO_LEQ] = LDX_LE,
+ [MO_LEUQ] = LDX_LE,
};
static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
@@ -1140,11 +1140,11 @@ static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_BEUW] = STH,
[MO_BEUL] = STW,
- [MO_BEQ] = STX,
+ [MO_BEUQ] = STX,
[MO_LEUW] = STH_LE,
[MO_LEUL] = STW_LE,
- [MO_LEQ] = STX_LE,
+ [MO_LEUQ] = STX_LE,
};
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 934aa8510b..5d2f0d8b10 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -1751,12 +1751,12 @@ static const char * const ldst_name[] =
[MO_LESW] = "lesw",
[MO_LEUL] = "leul",
[MO_LESL] = "lesl",
- [MO_LEQ] = "leq",
+ [MO_LEUQ] = "leq",
[MO_BEUW] = "beuw",
[MO_BESW] = "besw",
[MO_BEUL] = "beul",
[MO_BESL] = "besl",
- [MO_BEQ] = "beq",
+ [MO_BEUQ] = "beq",
};
static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
diff --git a/tcg/tci.c b/tcg/tci.c
index e76087ccac..336af5945a 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -309,7 +309,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
return helper_le_ldul_mmu(env, taddr, oi, ra);
case MO_LESL:
return helper_le_ldsl_mmu(env, taddr, oi, ra);
- case MO_LEQ:
+ case MO_LEUQ:
return helper_le_ldq_mmu(env, taddr, oi, ra);
case MO_BEUW:
return helper_be_lduw_mmu(env, taddr, oi, ra);
@@ -319,7 +319,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
return helper_be_ldul_mmu(env, taddr, oi, ra);
case MO_BESL:
return helper_be_ldsl_mmu(env, taddr, oi, ra);
- case MO_BEQ:
+ case MO_BEUQ:
return helper_be_ldq_mmu(env, taddr, oi, ra);
default:
g_assert_not_reached();
@@ -348,7 +348,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
case MO_LESL:
ret = (int32_t)ldl_le_p(haddr);
break;
- case MO_LEQ:
+ case MO_LEUQ:
ret = ldq_le_p(haddr);
break;
case MO_BEUW:
@@ -363,7 +363,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
case MO_BESL:
ret = (int32_t)ldl_be_p(haddr);
break;
- case MO_BEQ:
+ case MO_BEUQ:
ret = ldq_be_p(haddr);
break;
default:
@@ -391,7 +391,7 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
case MO_LEUL:
helper_le_stl_mmu(env, taddr, val, oi, ra);
break;
- case MO_LEQ:
+ case MO_LEUQ:
helper_le_stq_mmu(env, taddr, val, oi, ra);
break;
case MO_BEUW:
@@ -400,7 +400,7 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
case MO_BEUL:
helper_be_stl_mmu(env, taddr, val, oi, ra);
break;
- case MO_BEQ:
+ case MO_BEUQ:
helper_be_stq_mmu(env, taddr, val, oi, ra);
break;
default:
@@ -420,7 +420,7 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
case MO_LEUL:
stl_le_p(haddr, val);
break;
- case MO_LEQ:
+ case MO_LEUQ:
stq_le_p(haddr, val);
break;
case MO_BEUW:
@@ -429,7 +429,7 @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
case MO_BEUL:
stl_be_p(haddr, val);
break;
- case MO_BEQ:
+ case MO_BEUQ:
stq_be_p(haddr, val);
break;
default:
diff --git a/tests/data/acpi/q35/DSDT.tis.tpm12 b/tests/data/acpi/q35/DSDT.tis.tpm12
index 0ebdf6fbd7..fb9dd1f059 100644
--- a/tests/data/acpi/q35/DSDT.tis.tpm12
+++ b/tests/data/acpi/q35/DSDT.tis.tpm12
Binary files differ
diff --git a/tests/data/acpi/q35/DSDT.tis.tpm2 b/tests/data/acpi/q35/DSDT.tis.tpm2
index dcbb7f0af3..00d732e46f 100644
--- a/tests/data/acpi/q35/DSDT.tis.tpm2
+++ b/tests/data/acpi/q35/DSDT.tis.tpm2
Binary files differ
diff --git a/tests/data/acpi/q35/FACP.slic b/tests/data/acpi/q35/FACP.slic
new file mode 100644
index 0000000000..891fd4b784
--- /dev/null
+++ b/tests/data/acpi/q35/FACP.slic
Binary files differ
diff --git a/tests/data/acpi/q35/SLIC.slic b/tests/data/acpi/q35/SLIC.slic
new file mode 100644
index 0000000000..fd26592e24
--- /dev/null
+++ b/tests/data/acpi/q35/SLIC.slic
Binary files differ
diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c
index 9a468e29eb..e6b72d9026 100644
--- a/tests/qtest/bios-tables-test.c
+++ b/tests/qtest/bios-tables-test.c
@@ -1502,6 +1502,20 @@ static void test_acpi_virt_viot(void)
free_test_data(&data);
}
+static void test_acpi_q35_slic(void)
+{
+ test_data data = {
+ .machine = MACHINE_Q35,
+ .variant = ".slic",
+ };
+
+ test_acpi_one("-acpitable sig=SLIC,oem_id='CRASH ',oem_table_id='ME',"
+ "oem_rev=00002210,asl_compiler_id='qemu',"
+ "asl_compiler_rev=00000000,data=/dev/null",
+ &data);
+ free_test_data(&data);
+}
+
static void test_oem_fields(test_data *data)
{
int i;
@@ -1677,6 +1691,7 @@ int main(int argc, char *argv[])
qtest_add_func("acpi/q35/kvm/dmar", test_acpi_q35_kvm_dmar);
}
qtest_add_func("acpi/q35/viot", test_acpi_q35_viot);
+ qtest_add_func("acpi/q35/slic", test_acpi_q35_slic);
} else if (strcmp(arch, "aarch64") == 0) {
if (has_tcg) {
qtest_add_func("acpi/virt", test_acpi_virt_tcg);
diff --git a/tests/qtest/hd-geo-test.c b/tests/qtest/hd-geo-test.c
index 113126ae06..771eaa741b 100644
--- a/tests/qtest/hd-geo-test.c
+++ b/tests/qtest/hd-geo-test.c
@@ -960,9 +960,11 @@ int main(int argc, char **argv)
qtest_add_func("hd-geo/ide/device/user/chst", test_ide_device_user_chst);
if (have_qemu_img()) {
qtest_add_func("hd-geo/override/ide", test_override_ide);
- qtest_add_func("hd-geo/override/scsi", test_override_scsi);
- qtest_add_func("hd-geo/override/scsi_2_controllers",
- test_override_scsi_2_controllers);
+ if (qtest_has_device("lsi53c895a")) {
+ qtest_add_func("hd-geo/override/scsi", test_override_scsi);
+ qtest_add_func("hd-geo/override/scsi_2_controllers",
+ test_override_scsi_2_controllers);
+ }
qtest_add_func("hd-geo/override/virtio_blk", test_override_virtio_blk);
qtest_add_func("hd-geo/override/zero_chs", test_override_zero_chs);
qtest_add_func("hd-geo/override/scsi_hot_unplug",
diff --git a/tests/qtest/test-x86-cpuid-compat.c b/tests/qtest/test-x86-cpuid-compat.c
index f28848e06e..39138db774 100644
--- a/tests/qtest/test-x86-cpuid-compat.c
+++ b/tests/qtest/test-x86-cpuid-compat.c
@@ -302,54 +302,65 @@ int main(int argc, char **argv)
/* Check compatibility of old machine-types that didn't
* auto-increase level/xlevel/xlevel2: */
-
- add_cpuid_test("x86/cpuid/auto-level/pc-2.7",
- "-machine pc-i440fx-2.7 -cpu 486,arat=on,avx512vbmi=on,xsaveopt=on",
- "level", 1);
- add_cpuid_test("x86/cpuid/auto-xlevel/pc-2.7",
- "-machine pc-i440fx-2.7 -cpu 486,3dnow=on,sse4a=on,invtsc=on,npt=on,svm=on",
- "xlevel", 0);
- add_cpuid_test("x86/cpuid/auto-xlevel2/pc-2.7",
- "-machine pc-i440fx-2.7 -cpu 486,xstore=on",
- "xlevel2", 0);
+ if (qtest_has_machine("pc-i440fx-2.7")) {
+ add_cpuid_test("x86/cpuid/auto-level/pc-2.7",
+ "-machine pc-i440fx-2.7 -cpu 486,arat=on,avx512vbmi=on,xsaveopt=on",
+ "level", 1);
+ add_cpuid_test("x86/cpuid/auto-xlevel/pc-2.7",
+ "-machine pc-i440fx-2.7 -cpu 486,3dnow=on,sse4a=on,invtsc=on,npt=on,svm=on",
+ "xlevel", 0);
+ add_cpuid_test("x86/cpuid/auto-xlevel2/pc-2.7",
+ "-machine pc-i440fx-2.7 -cpu 486,xstore=on",
+ "xlevel2", 0);
+ }
/*
* QEMU 1.4.0 had auto-level enabled for CPUID[7], already,
* and the compat code that sets default level shouldn't
* disable the auto-level=7 code:
*/
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.4/off",
- "-machine pc-i440fx-1.4 -cpu Nehalem",
- "level", 2);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.5/on",
- "-machine pc-i440fx-1.4 -cpu Nehalem,smap=on",
- "level", 7);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off",
- "-machine pc-i440fx-2.3 -cpu Penryn",
- "level", 4);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/on",
- "-machine pc-i440fx-2.3 -cpu Penryn,erms=on",
- "level", 7);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/off",
- "-machine pc-i440fx-2.9 -cpu Conroe",
- "level", 10);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/on",
- "-machine pc-i440fx-2.9 -cpu Conroe,erms=on",
- "level", 10);
+ if (qtest_has_machine("pc-i440fx-1.4")) {
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.4/off",
+ "-machine pc-i440fx-1.4 -cpu Nehalem",
+ "level", 2);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.5/on",
+ "-machine pc-i440fx-1.4 -cpu Nehalem,smap=on",
+ "level", 7);
+ }
+ if (qtest_has_machine("pc-i440fx-2.3")) {
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off",
+ "-machine pc-i440fx-2.3 -cpu Penryn",
+ "level", 4);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/on",
+ "-machine pc-i440fx-2.3 -cpu Penryn,erms=on",
+ "level", 7);
+ }
+ if (qtest_has_machine("pc-i440fx-2.9")) {
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/off",
+ "-machine pc-i440fx-2.9 -cpu Conroe",
+ "level", 10);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/on",
+ "-machine pc-i440fx-2.9 -cpu Conroe,erms=on",
+ "level", 10);
+ }
/*
* xlevel doesn't have any feature that triggers auto-level
* code on old machine-types. Just check that the compat code
* is working correctly:
*/
- add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.3",
- "-machine pc-i440fx-2.3 -cpu SandyBridge",
- "xlevel", 0x8000000a);
- add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-off",
- "-machine pc-i440fx-2.4 -cpu SandyBridge,",
- "xlevel", 0x80000008);
- add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on",
- "-machine pc-i440fx-2.4 -cpu SandyBridge,svm=on,npt=on",
- "xlevel", 0x80000008);
+ if (qtest_has_machine("pc-i440fx-2.3")) {
+ add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.3",
+ "-machine pc-i440fx-2.3 -cpu SandyBridge",
+ "xlevel", 0x8000000a);
+ }
+ if (qtest_has_machine("pc-i440fx-2.4")) {
+ add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-off",
+ "-machine pc-i440fx-2.4 -cpu SandyBridge,",
+ "xlevel", 0x80000008);
+ add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on",
+ "-machine pc-i440fx-2.4 -cpu SandyBridge,svm=on,npt=on",
+ "xlevel", 0x80000008);
+ }
/* Test feature parsing */
add_feature_test("x86/cpuid/features/plus",
diff --git a/tests/unit/test-util-sockets.c b/tests/unit/test-util-sockets.c
index 72b9246529..896247e3ed 100644
--- a/tests/unit/test-util-sockets.c
+++ b/tests/unit/test-util-sockets.c
@@ -305,9 +305,11 @@ static void test_socket_unix_abstract(void)
};
int i;
+ i = g_file_open_tmp("unix-XXXXXX", &addr.u.q_unix.path, NULL);
+ g_assert_true(i >= 0);
+ close(i);
+
addr.type = SOCKET_ADDRESS_TYPE_UNIX;
- addr.u.q_unix.path = g_strdup_printf("unix-%d-%u",
- getpid(), g_random_int());
addr.u.q_unix.has_abstract = true;
addr.u.q_unix.abstract = true;
addr.u.q_unix.has_tight = false;
diff --git a/util/int128.c b/util/int128.c
new file mode 100644
index 0000000000..ed8f25fef1
--- /dev/null
+++ b/util/int128.c
@@ -0,0 +1,147 @@
+/*
+ * 128-bit division and remainder for compilers not supporting __int128
+ *
+ * Copyright (c) 2021 Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "qemu/int128.h"
+
+#ifndef CONFIG_INT128
+
+/*
+ * Division and remainder algorithms for 128-bit due to Stefan Kanthak,
+ * https://skanthak.homepage.t-online.de/integer.html#udivmodti4
+ * Preconditions:
+ * - function should never be called with v equals to 0, it has to
+ * be dealt with beforehand
+ * - quotien pointer must be valid
+ */
+static Int128 divrem128(Int128 u, Int128 v, Int128 *q)
+{
+ Int128 qq;
+ uint64_t hi, lo, tmp;
+ int s = clz64(v.hi);
+
+ if (s == 64) {
+ /* we have uu÷0v => let's use divu128 */
+ hi = u.hi;
+ lo = u.lo;
+ tmp = divu128(&lo, &hi, v.lo);
+ *q = int128_make128(lo, hi);
+ return int128_make128(tmp, 0);
+ } else {
+ hi = int128_gethi(int128_lshift(v, s));
+
+ if (hi > u.hi) {
+ lo = u.lo;
+ tmp = u.hi;
+ divu128(&lo, &tmp, hi);
+ lo = int128_gethi(int128_lshift(int128_make128(lo, 0), s));
+ } else { /* prevent overflow */
+ lo = u.lo;
+ tmp = u.hi - hi;
+ divu128(&lo, &tmp, hi);
+ lo = int128_gethi(int128_lshift(int128_make128(lo, 1), s));
+ }
+
+ qq = int128_make64(lo);
+
+ tmp = lo * v.hi;
+ mulu64(&lo, &hi, lo, v.lo);
+ hi += tmp;
+
+ if (hi < tmp /* quotient * divisor >= 2**128 > dividend */
+ || hi > u.hi /* quotient * divisor > dividend */
+ || (hi == u.hi && lo > u.lo)) {
+ qq.lo -= 1;
+ mulu64(&lo, &hi, qq.lo, v.lo);
+ hi += qq.lo * v.hi;
+ }
+
+ *q = qq;
+ u.hi -= hi + (u.lo < lo);
+ u.lo -= lo;
+ return u;
+ }
+}
+
+Int128 int128_divu(Int128 a, Int128 b)
+{
+ Int128 q;
+ divrem128(a, b, &q);
+ return q;
+}
+
+Int128 int128_remu(Int128 a, Int128 b)
+{
+ Int128 q;
+ return divrem128(a, b, &q);
+}
+
+Int128 int128_divs(Int128 a, Int128 b)
+{
+ Int128 q;
+ bool sgna = !int128_nonneg(a);
+ bool sgnb = !int128_nonneg(b);
+
+ if (sgna) {
+ a = int128_neg(a);
+ }
+
+ if (sgnb) {
+ b = int128_neg(b);
+ }
+
+ divrem128(a, b, &q);
+
+ if (sgna != sgnb) {
+ q = int128_neg(q);
+ }
+
+ return q;
+}
+
+Int128 int128_rems(Int128 a, Int128 b)
+{
+ Int128 q, r;
+ bool sgna = !int128_nonneg(a);
+ bool sgnb = !int128_nonneg(b);
+
+ if (sgna) {
+ a = int128_neg(a);
+ }
+
+ if (sgnb) {
+ b = int128_neg(b);
+ }
+
+ r = divrem128(a, b, &q);
+
+ if (sgna) {
+ r = int128_neg(r);
+ }
+
+ return r;
+}
+
+#endif
diff --git a/util/meson.build b/util/meson.build
index 05b593055a..e676b2f6c6 100644
--- a/util/meson.build
+++ b/util/meson.build
@@ -48,6 +48,7 @@ util_ss.add(files('transactions.c'))
util_ss.add(when: 'CONFIG_POSIX', if_true: files('drm.c'))
util_ss.add(files('guest-random.c'))
util_ss.add(files('yank.c'))
+util_ss.add(files('int128.c'))
if have_user
util_ss.add(files('selfmap.c'))
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
index e8bdb02e1d..9efdc74bba 100644
--- a/util/oslib-posix.c
+++ b/util/oslib-posix.c
@@ -35,11 +35,13 @@
#include "sysemu/sysemu.h"
#include "trace.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qemu/sockets.h"
#include "qemu/thread.h"
#include <libgen.h>
#include "qemu/cutils.h"
#include "qemu/compiler.h"
+#include "qemu/units.h"
#ifdef CONFIG_LINUX
#include <sys/syscall.h>
@@ -73,22 +75,32 @@
#define MAX_MEM_PREALLOC_THREAD_COUNT 16
+struct MemsetThread;
+
+typedef struct MemsetContext {
+ bool all_threads_created;
+ bool any_thread_failed;
+ struct MemsetThread *threads;
+ int num_threads;
+} MemsetContext;
+
struct MemsetThread {
char *addr;
size_t numpages;
size_t hpagesize;
QemuThread pgthread;
sigjmp_buf env;
+ MemsetContext *context;
};
typedef struct MemsetThread MemsetThread;
-static MemsetThread *memset_thread;
-static int memset_num_threads;
-static bool memset_thread_failed;
+/* used by sigbus_handler() */
+static MemsetContext *sigbus_memset_context;
+struct sigaction sigbus_oldact;
+static QemuMutex sigbus_mutex;
static QemuMutex page_mutex;
static QemuCond page_cond;
-static bool threads_created_flag;
int qemu_get_thread_id(void)
{
@@ -436,22 +448,50 @@ const char *qemu_get_exec_dir(void)
return exec_dir;
}
+#ifdef CONFIG_LINUX
+static void sigbus_handler(int signal, siginfo_t *siginfo, void *ctx)
+#else /* CONFIG_LINUX */
static void sigbus_handler(int signal)
+#endif /* CONFIG_LINUX */
{
int i;
- if (memset_thread) {
- for (i = 0; i < memset_num_threads; i++) {
- if (qemu_thread_is_self(&memset_thread[i].pgthread)) {
- siglongjmp(memset_thread[i].env, 1);
+
+ if (sigbus_memset_context) {
+ for (i = 0; i < sigbus_memset_context->num_threads; i++) {
+ MemsetThread *thread = &sigbus_memset_context->threads[i];
+
+ if (qemu_thread_is_self(&thread->pgthread)) {
+ siglongjmp(thread->env, 1);
}
}
}
+
+#ifdef CONFIG_LINUX
+ /*
+ * We assume that the MCE SIGBUS handler could have been registered. We
+ * should never receive BUS_MCEERR_AO on any of our threads, but only on
+ * the main thread registered for PR_MCE_KILL_EARLY. Further, we should not
+ * receive BUS_MCEERR_AR triggered by action of other threads on one of
+ * our threads. So, no need to check for unrelated SIGBUS when seeing one
+ * for our threads.
+ *
+ * We will forward to the MCE handler, which will either handle the SIGBUS
+ * or reinstall the default SIGBUS handler and reraise the SIGBUS. The
+ * default SIGBUS handler will crash the process, so we don't care.
+ */
+ if (sigbus_oldact.sa_flags & SA_SIGINFO) {
+ sigbus_oldact.sa_sigaction(signal, siginfo, ctx);
+ return;
+ }
+#endif /* CONFIG_LINUX */
+ warn_report("os_mem_prealloc: unrelated SIGBUS detected and ignored");
}
static void *do_touch_pages(void *arg)
{
MemsetThread *memset_args = (MemsetThread *)arg;
sigset_t set, oldset;
+ int ret = 0;
/*
* On Linux, the page faults from the loop below can cause mmap_sem
@@ -459,7 +499,7 @@ static void *do_touch_pages(void *arg)
* clearing until all threads have been created.
*/
qemu_mutex_lock(&page_mutex);
- while(!threads_created_flag){
+ while (!memset_args->context->all_threads_created) {
qemu_cond_wait(&page_cond, &page_mutex);
}
qemu_mutex_unlock(&page_mutex);
@@ -470,7 +510,7 @@ static void *do_touch_pages(void *arg)
pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
if (sigsetjmp(memset_args->env, 1)) {
- memset_thread_failed = true;
+ ret = -EFAULT;
} else {
char *addr = memset_args->addr;
size_t numpages = memset_args->numpages;
@@ -484,20 +524,37 @@ static void *do_touch_pages(void *arg)
*
* 'volatile' to stop compiler optimizing this away
* to a no-op
- *
- * TODO: get a better solution from kernel so we
- * don't need to write at all so we don't cause
- * wear on the storage backing the region...
*/
*(volatile char *)addr = *addr;
addr += hpagesize;
}
}
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
- return NULL;
+ return (void *)(uintptr_t)ret;
}
-static inline int get_memset_num_threads(int smp_cpus)
+static void *do_madv_populate_write_pages(void *arg)
+{
+ MemsetThread *memset_args = (MemsetThread *)arg;
+ const size_t size = memset_args->numpages * memset_args->hpagesize;
+ char * const addr = memset_args->addr;
+ int ret = 0;
+
+ /* See do_touch_pages(). */
+ qemu_mutex_lock(&page_mutex);
+ while (!memset_args->context->all_threads_created) {
+ qemu_cond_wait(&page_cond, &page_mutex);
+ }
+ qemu_mutex_unlock(&page_mutex);
+
+ if (size && qemu_madvise(addr, size, QEMU_MADV_POPULATE_WRITE)) {
+ ret = -errno;
+ }
+ return (void *)(uintptr_t)ret;
+}
+
+static inline int get_memset_num_threads(size_t hpagesize, size_t numpages,
+ int smp_cpus)
{
long host_procs = sysconf(_SC_NPROCESSORS_ONLN);
int ret = 1;
@@ -505,17 +562,27 @@ static inline int get_memset_num_threads(int smp_cpus)
if (host_procs > 0) {
ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), smp_cpus);
}
+
+ /* Especially with gigantic pages, don't create more threads than pages. */
+ ret = MIN(ret, numpages);
+ /* Don't start threads to prealloc comparatively little memory. */
+ ret = MIN(ret, MAX(1, hpagesize * numpages / (64 * MiB)));
+
/* In case sysconf() fails, we fall back to single threaded */
return ret;
}
-static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
- int smp_cpus)
+static int touch_all_pages(char *area, size_t hpagesize, size_t numpages,
+ int smp_cpus, bool use_madv_populate_write)
{
static gsize initialized = 0;
+ MemsetContext context = {
+ .num_threads = get_memset_num_threads(hpagesize, numpages, smp_cpus),
+ };
size_t numpages_per_thread, leftover;
+ void *(*touch_fn)(void *);
+ int ret = 0, i = 0;
char *addr = area;
- int i = 0;
if (g_once_init_enter(&initialized)) {
qemu_mutex_init(&page_mutex);
@@ -523,66 +590,121 @@ static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
g_once_init_leave(&initialized, 1);
}
- memset_thread_failed = false;
- threads_created_flag = false;
- memset_num_threads = get_memset_num_threads(smp_cpus);
- memset_thread = g_new0(MemsetThread, memset_num_threads);
- numpages_per_thread = numpages / memset_num_threads;
- leftover = numpages % memset_num_threads;
- for (i = 0; i < memset_num_threads; i++) {
- memset_thread[i].addr = addr;
- memset_thread[i].numpages = numpages_per_thread + (i < leftover);
- memset_thread[i].hpagesize = hpagesize;
- qemu_thread_create(&memset_thread[i].pgthread, "touch_pages",
- do_touch_pages, &memset_thread[i],
+ if (use_madv_populate_write) {
+ /* Avoid creating a single thread for MADV_POPULATE_WRITE */
+ if (context.num_threads == 1) {
+ if (qemu_madvise(area, hpagesize * numpages,
+ QEMU_MADV_POPULATE_WRITE)) {
+ return -errno;
+ }
+ return 0;
+ }
+ touch_fn = do_madv_populate_write_pages;
+ } else {
+ touch_fn = do_touch_pages;
+ }
+
+ context.threads = g_new0(MemsetThread, context.num_threads);
+ numpages_per_thread = numpages / context.num_threads;
+ leftover = numpages % context.num_threads;
+ for (i = 0; i < context.num_threads; i++) {
+ context.threads[i].addr = addr;
+ context.threads[i].numpages = numpages_per_thread + (i < leftover);
+ context.threads[i].hpagesize = hpagesize;
+ context.threads[i].context = &context;
+ qemu_thread_create(&context.threads[i].pgthread, "touch_pages",
+ touch_fn, &context.threads[i],
QEMU_THREAD_JOINABLE);
- addr += memset_thread[i].numpages * hpagesize;
+ addr += context.threads[i].numpages * hpagesize;
+ }
+
+ if (!use_madv_populate_write) {
+ sigbus_memset_context = &context;
}
qemu_mutex_lock(&page_mutex);
- threads_created_flag = true;
+ context.all_threads_created = true;
qemu_cond_broadcast(&page_cond);
qemu_mutex_unlock(&page_mutex);
- for (i = 0; i < memset_num_threads; i++) {
- qemu_thread_join(&memset_thread[i].pgthread);
+ for (i = 0; i < context.num_threads; i++) {
+ int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread);
+
+ if (tmp) {
+ ret = tmp;
+ }
}
- g_free(memset_thread);
- memset_thread = NULL;
- return memset_thread_failed;
+ if (!use_madv_populate_write) {
+ sigbus_memset_context = NULL;
+ }
+ g_free(context.threads);
+
+ return ret;
+}
+
+static bool madv_populate_write_possible(char *area, size_t pagesize)
+{
+ return !qemu_madvise(area, pagesize, QEMU_MADV_POPULATE_WRITE) ||
+ errno != EINVAL;
}
void os_mem_prealloc(int fd, char *area, size_t memory, int smp_cpus,
Error **errp)
{
+ static gsize initialized;
int ret;
- struct sigaction act, oldact;
size_t hpagesize = qemu_fd_getpagesize(fd);
size_t numpages = DIV_ROUND_UP(memory, hpagesize);
+ bool use_madv_populate_write;
+ struct sigaction act;
- memset(&act, 0, sizeof(act));
- act.sa_handler = &sigbus_handler;
- act.sa_flags = 0;
+ /*
+ * Sense on every invocation, as MADV_POPULATE_WRITE cannot be used for
+ * some special mappings, such as mapping /dev/mem.
+ */
+ use_madv_populate_write = madv_populate_write_possible(area, hpagesize);
- ret = sigaction(SIGBUS, &act, &oldact);
- if (ret) {
- error_setg_errno(errp, errno,
- "os_mem_prealloc: failed to install signal handler");
- return;
+ if (!use_madv_populate_write) {
+ if (g_once_init_enter(&initialized)) {
+ qemu_mutex_init(&sigbus_mutex);
+ g_once_init_leave(&initialized, 1);
+ }
+
+ qemu_mutex_lock(&sigbus_mutex);
+ memset(&act, 0, sizeof(act));
+#ifdef CONFIG_LINUX
+ act.sa_sigaction = &sigbus_handler;
+ act.sa_flags = SA_SIGINFO;
+#else /* CONFIG_LINUX */
+ act.sa_handler = &sigbus_handler;
+ act.sa_flags = 0;
+#endif /* CONFIG_LINUX */
+
+ ret = sigaction(SIGBUS, &act, &sigbus_oldact);
+ if (ret) {
+ error_setg_errno(errp, errno,
+ "os_mem_prealloc: failed to install signal handler");
+ return;
+ }
}
/* touch pages simultaneously */
- if (touch_all_pages(area, hpagesize, numpages, smp_cpus)) {
- error_setg(errp, "os_mem_prealloc: Insufficient free host memory "
- "pages available to allocate guest RAM");
+ ret = touch_all_pages(area, hpagesize, numpages, smp_cpus,
+ use_madv_populate_write);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "os_mem_prealloc: preallocating memory failed");
}
- ret = sigaction(SIGBUS, &oldact, NULL);
- if (ret) {
- /* Terminate QEMU since it can't recover from error */
- perror("os_mem_prealloc: failed to reinstall signal handler");
- exit(1);
+ if (!use_madv_populate_write) {
+ ret = sigaction(SIGBUS, &sigbus_oldact, NULL);
+ if (ret) {
+ /* Terminate QEMU since it can't recover from error */
+ perror("os_mem_prealloc: failed to reinstall signal handler");
+ exit(1);
+ }
+ qemu_mutex_unlock(&sigbus_mutex);
}
}